diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -834,12 +834,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -848,12 +848,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -863,12 +863,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -905,12 +905,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -919,12 +919,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -933,12 +933,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -947,12 +947,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -961,12 +961,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -975,12 +975,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -989,12 +989,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1004,12 +1004,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1018,12 +1018,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1032,12 +1032,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1046,12 +1046,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1060,12 +1060,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1074,12 +1074,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1088,12 +1088,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1102,12 +1102,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1116,12 +1116,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1130,12 +1130,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1144,12 +1144,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1158,12 +1158,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1172,12 +1172,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1186,12 +1186,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1214,12 +1214,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1228,12 +1228,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1242,12 +1242,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1257,12 +1257,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1272,12 +1272,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1287,12 +1287,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1302,12 +1302,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1317,12 +1317,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1332,12 +1332,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1347,12 +1347,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1362,12 +1362,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1377,12 +1377,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1392,12 +1392,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1422,12 +1422,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1437,12 +1437,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1452,12 +1452,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1468,12 +1468,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1483,12 +1483,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1499,12 +1499,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1514,12 +1514,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1529,12 +1529,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1544,12 +1544,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1559,12 +1559,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1574,12 +1574,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1604,12 +1604,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1619,12 +1619,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1634,12 +1634,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1650,12 +1650,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1665,12 +1665,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1680,12 +1680,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1695,12 +1695,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1710,12 +1710,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1725,12 +1725,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1740,12 +1740,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1755,12 +1755,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1770,12 +1770,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1800,12 +1800,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1815,12 +1815,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1830,12 +1830,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1845,12 +1845,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1860,12 +1860,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1875,12 +1875,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1890,12 +1890,12 @@ // CHECK-RV32-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1905,12 +1905,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1921,12 +1921,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1936,12 +1936,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1952,12 +1952,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1983,12 +1983,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1998,12 +1998,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2013,12 +2013,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2028,12 +2028,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2043,12 +2043,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2058,12 +2058,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2073,12 +2073,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2088,12 +2088,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2103,12 +2103,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2118,12 +2118,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2134,12 +2134,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2150,12 +2150,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2166,12 +2166,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2182,12 +2182,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2198,12 +2198,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2213,12 +2213,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2229,12 +2229,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2244,12 +2244,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2260,12 +2260,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2291,12 +2291,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2306,12 +2306,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2322,12 +2322,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2338,12 +2338,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2354,12 +2354,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2369,12 +2369,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2385,12 +2385,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2400,12 +2400,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2416,12 +2416,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2431,12 +2431,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2447,12 +2447,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2462,12 +2462,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2478,12 +2478,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2493,12 +2493,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2509,12 +2509,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2524,12 +2524,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2540,12 +2540,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2555,12 +2555,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2571,12 +2571,12 @@ // CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, @@ -682,12 +682,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin, @@ -697,12 +697,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, @@ -712,12 +712,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin, @@ -727,12 +727,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, @@ -742,12 +742,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin, @@ -757,12 +757,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin, @@ -772,12 +772,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, @@ -787,12 +787,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin, @@ -802,12 +802,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, @@ -817,12 +817,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin, @@ -832,12 +832,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin, @@ -862,12 +862,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -892,12 +892,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -922,12 +922,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, @@ -937,12 +937,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -952,12 +952,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, @@ -967,12 +967,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin, @@ -997,12 +997,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1012,12 +1012,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin, @@ -1027,12 +1027,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1042,12 +1042,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin, @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1072,12 +1072,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, @@ -1087,12 +1087,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, @@ -1102,12 +1102,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, @@ -1117,12 +1117,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, @@ -1132,12 +1132,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, @@ -1162,12 +1162,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin, @@ -1177,12 +1177,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, @@ -1192,12 +1192,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin, @@ -1207,12 +1207,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, @@ -1222,12 +1222,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, @@ -1237,12 +1237,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, @@ -1252,12 +1252,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, @@ -1282,12 +1282,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, @@ -1297,12 +1297,12 @@ // CHECK-RV32-LABEL: @test_vadc_vvm_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, @@ -1312,12 +1312,12 @@ // CHECK-RV32-LABEL: @test_vadc_vxm_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadc_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -205,12 +205,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -387,12 +387,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -415,12 +415,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -471,12 +471,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -485,12 +485,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -499,12 +499,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -541,12 +541,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -555,12 +555,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -569,12 +569,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -583,12 +583,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -597,12 +597,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -611,12 +611,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -625,12 +625,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -653,12 +653,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -681,12 +681,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -695,12 +695,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -709,12 +709,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -723,12 +723,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -737,12 +737,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -751,12 +751,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -765,12 +765,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -793,12 +793,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -807,12 +807,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -821,12 +821,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -835,12 +835,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -849,12 +849,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -863,12 +863,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -905,12 +905,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -919,12 +919,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -933,12 +933,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -947,12 +947,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -961,12 +961,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -975,12 +975,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -989,12 +989,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1017,12 +1017,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1031,12 +1031,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1045,12 +1045,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1059,12 +1059,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1073,12 +1073,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1087,12 +1087,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1101,12 +1101,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1115,12 +1115,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1129,12 +1129,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1143,12 +1143,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1157,12 +1157,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1171,12 +1171,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1185,12 +1185,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1199,12 +1199,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1213,12 +1213,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1241,12 +1241,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1255,12 +1255,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1269,12 +1269,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1283,12 +1283,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1297,12 +1297,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1311,12 +1311,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1325,12 +1325,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1353,12 +1353,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1367,12 +1367,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1381,12 +1381,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1395,12 +1395,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1409,12 +1409,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1423,12 +1423,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1437,12 +1437,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1451,12 +1451,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1465,12 +1465,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1479,12 +1479,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1493,12 +1493,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1507,12 +1507,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1521,12 +1521,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1535,12 +1535,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1549,12 +1549,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1563,12 +1563,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1577,12 +1577,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1591,12 +1591,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1605,12 +1605,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1619,12 +1619,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1633,12 +1633,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1647,12 +1647,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1661,12 +1661,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1675,12 +1675,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1689,12 +1689,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1703,12 +1703,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1717,12 +1717,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1731,12 +1731,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1745,12 +1745,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1759,12 +1759,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1773,12 +1773,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1787,12 +1787,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1801,12 +1801,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1815,12 +1815,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1829,12 +1829,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1843,12 +1843,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1857,12 +1857,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1871,12 +1871,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1885,12 +1885,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1899,12 +1899,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1913,12 +1913,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1927,12 +1927,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1941,12 +1941,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1955,12 +1955,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1969,12 +1969,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1983,12 +1983,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1997,12 +1997,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2011,12 +2011,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2025,12 +2025,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2039,12 +2039,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2053,12 +2053,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2067,12 +2067,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2081,12 +2081,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2095,12 +2095,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2109,12 +2109,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2123,12 +2123,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2137,12 +2137,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2151,12 +2151,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2165,12 +2165,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2179,12 +2179,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2193,12 +2193,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2207,12 +2207,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2221,12 +2221,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2235,12 +2235,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2249,12 +2249,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2263,12 +2263,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2277,12 +2277,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2291,12 +2291,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2305,12 +2305,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2319,12 +2319,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2333,12 +2333,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2347,12 +2347,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2361,12 +2361,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2375,12 +2375,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2389,12 +2389,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2403,12 +2403,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2417,12 +2417,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2431,12 +2431,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2445,12 +2445,12 @@ // CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2459,12 +2459,12 @@ // CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoadd.c @@ -8,13 +8,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -24,13 +24,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -40,13 +40,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -56,13 +56,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -72,13 +72,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -88,13 +88,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -104,13 +104,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -120,13 +120,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -136,13 +136,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -152,13 +152,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -168,13 +168,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -184,13 +184,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -200,13 +200,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -216,13 +216,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -232,13 +232,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -248,13 +248,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -264,13 +264,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -280,13 +280,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -296,13 +296,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -312,13 +312,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -328,13 +328,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -344,13 +344,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -360,13 +360,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -376,13 +376,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -392,13 +392,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -408,13 +408,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -424,13 +424,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -440,13 +440,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -456,13 +456,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -472,13 +472,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -488,13 +488,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -504,13 +504,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -520,13 +520,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -536,13 +536,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -552,13 +552,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -568,13 +568,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -584,13 +584,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -600,13 +600,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -616,13 +616,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -632,13 +632,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -648,13 +648,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -664,13 +664,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -680,13 +680,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -696,13 +696,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -712,13 +712,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -728,13 +728,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -744,13 +744,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -760,13 +760,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -776,13 +776,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -792,13 +792,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -808,13 +808,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -824,13 +824,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -840,13 +840,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -856,13 +856,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -872,13 +872,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -888,13 +888,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -904,13 +904,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -920,13 +920,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -936,13 +936,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -952,13 +952,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -968,13 +968,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -984,13 +984,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1000,13 +1000,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1016,13 +1016,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1032,13 +1032,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1048,13 +1048,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1064,13 +1064,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1080,13 +1080,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1096,13 +1096,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1112,13 +1112,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { @@ -1128,13 +1128,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -1144,13 +1144,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -1160,13 +1160,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -1176,13 +1176,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -1192,13 +1192,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -1208,13 +1208,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -1224,13 +1224,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -1240,13 +1240,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -1256,13 +1256,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -1272,13 +1272,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -1288,13 +1288,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -1304,13 +1304,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -1320,13 +1320,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -1336,13 +1336,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -1352,13 +1352,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoaddei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -1368,13 +1368,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoaddei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -1384,13 +1384,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoaddei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -1400,13 +1400,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoaddei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -1416,13 +1416,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoaddei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -1432,13 +1432,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -1448,13 +1448,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1464,13 +1464,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1480,13 +1480,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1496,13 +1496,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1512,13 +1512,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1528,13 +1528,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1544,13 +1544,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1560,13 +1560,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1576,13 +1576,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1592,13 +1592,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1608,13 +1608,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1624,13 +1624,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoaddei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1640,13 +1640,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoaddei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1656,13 +1656,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoaddei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1672,13 +1672,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoaddei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1688,13 +1688,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1704,13 +1704,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1720,13 +1720,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1736,13 +1736,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1752,13 +1752,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1768,13 +1768,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1784,13 +1784,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1800,13 +1800,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1816,13 +1816,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1832,13 +1832,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1848,13 +1848,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1864,13 +1864,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1880,13 +1880,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1896,13 +1896,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1912,13 +1912,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoaddei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1928,13 +1928,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoaddei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1944,13 +1944,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoaddei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1960,13 +1960,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoaddei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1976,13 +1976,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoaddei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1992,13 +1992,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -2008,13 +2008,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -2024,13 +2024,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -2040,13 +2040,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -2056,13 +2056,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -2072,13 +2072,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -2088,13 +2088,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -2104,13 +2104,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -2120,13 +2120,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -2136,13 +2136,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -2152,13 +2152,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -2168,13 +2168,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -2184,13 +2184,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoaddei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -2200,13 +2200,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoaddei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -2216,13 +2216,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoaddei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -2232,13 +2232,13 @@ // CHECK-RV32-LABEL: @test_vamoaddei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoaddei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoaddei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoand.c @@ -8,13 +8,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -24,13 +24,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -40,13 +40,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -56,13 +56,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -72,13 +72,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -88,13 +88,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -104,13 +104,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -120,13 +120,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -136,13 +136,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -152,13 +152,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -168,13 +168,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -184,13 +184,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -200,13 +200,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -216,13 +216,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -232,13 +232,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -248,13 +248,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -264,13 +264,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -280,13 +280,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -296,13 +296,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -312,13 +312,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -328,13 +328,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -344,13 +344,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -360,13 +360,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -376,13 +376,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -392,13 +392,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -408,13 +408,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -424,13 +424,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -440,13 +440,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -456,13 +456,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -472,13 +472,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -488,13 +488,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -504,13 +504,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -520,13 +520,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -536,13 +536,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -552,13 +552,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -568,13 +568,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -584,13 +584,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -600,13 +600,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -616,13 +616,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -632,13 +632,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -648,13 +648,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -664,13 +664,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -680,13 +680,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -696,13 +696,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -712,13 +712,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -728,13 +728,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -744,13 +744,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -760,13 +760,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -776,13 +776,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -792,13 +792,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -808,13 +808,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -824,13 +824,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -840,13 +840,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -856,13 +856,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -872,13 +872,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -888,13 +888,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -904,13 +904,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -920,13 +920,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -936,13 +936,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -952,13 +952,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -968,13 +968,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -984,13 +984,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1000,13 +1000,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1016,13 +1016,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1032,13 +1032,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1048,13 +1048,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1064,13 +1064,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1080,13 +1080,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1096,13 +1096,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1112,13 +1112,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { @@ -1128,13 +1128,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -1144,13 +1144,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -1160,13 +1160,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -1176,13 +1176,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -1192,13 +1192,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -1208,13 +1208,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -1224,13 +1224,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -1240,13 +1240,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -1256,13 +1256,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -1272,13 +1272,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -1288,13 +1288,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -1304,13 +1304,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -1320,13 +1320,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -1336,13 +1336,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -1352,13 +1352,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoandei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -1368,13 +1368,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoandei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -1384,13 +1384,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoandei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -1400,13 +1400,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoandei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -1416,13 +1416,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoandei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -1432,13 +1432,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -1448,13 +1448,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1464,13 +1464,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1480,13 +1480,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1496,13 +1496,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1512,13 +1512,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1528,13 +1528,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1544,13 +1544,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1560,13 +1560,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1576,13 +1576,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1592,13 +1592,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1608,13 +1608,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1624,13 +1624,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoandei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1640,13 +1640,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoandei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1656,13 +1656,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoandei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1672,13 +1672,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoandei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1688,13 +1688,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1704,13 +1704,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1720,13 +1720,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1736,13 +1736,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1752,13 +1752,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1768,13 +1768,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1784,13 +1784,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1800,13 +1800,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1816,13 +1816,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1832,13 +1832,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1848,13 +1848,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1864,13 +1864,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1880,13 +1880,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1896,13 +1896,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1912,13 +1912,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoandei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1928,13 +1928,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoandei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1944,13 +1944,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoandei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1960,13 +1960,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoandei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1976,13 +1976,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoandei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1992,13 +1992,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -2008,13 +2008,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -2024,13 +2024,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -2040,13 +2040,13 @@ // CHECK-RV32-LABEL: @test_vamoandei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -2056,13 +2056,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -2072,13 +2072,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -2088,13 +2088,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -2104,13 +2104,13 @@ // CHECK-RV32-LABEL: @test_vamoandei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -2120,13 +2120,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -2136,13 +2136,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -2152,13 +2152,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -2168,13 +2168,13 @@ // CHECK-RV32-LABEL: @test_vamoandei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -2184,13 +2184,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoandei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -2200,13 +2200,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoandei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -2216,13 +2216,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoandei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -2232,13 +2232,13 @@ // CHECK-RV32-LABEL: @test_vamoandei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoandei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoandei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomax.c @@ -8,13 +8,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -24,13 +24,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -40,13 +40,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -56,13 +56,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -72,13 +72,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -88,13 +88,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -104,13 +104,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -120,13 +120,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -136,13 +136,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -152,13 +152,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -168,13 +168,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -184,13 +184,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -200,13 +200,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -216,13 +216,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -232,13 +232,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -248,13 +248,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -264,13 +264,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -280,13 +280,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -296,13 +296,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -312,13 +312,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -328,13 +328,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -344,13 +344,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -360,13 +360,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -376,13 +376,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -392,13 +392,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -408,13 +408,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -424,13 +424,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -440,13 +440,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -456,13 +456,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -472,13 +472,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -488,13 +488,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -504,13 +504,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -520,13 +520,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -536,13 +536,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -552,13 +552,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -568,13 +568,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -584,13 +584,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -600,13 +600,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -616,13 +616,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -632,13 +632,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -648,13 +648,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -664,13 +664,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -680,13 +680,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -696,13 +696,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -712,13 +712,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -728,13 +728,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -744,13 +744,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -760,13 +760,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -776,13 +776,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -792,13 +792,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -808,13 +808,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -824,13 +824,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -840,13 +840,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -856,13 +856,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -872,13 +872,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -888,13 +888,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -904,13 +904,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -920,13 +920,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -936,13 +936,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -952,13 +952,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -968,13 +968,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -984,13 +984,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1000,13 +1000,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1016,13 +1016,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1032,13 +1032,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1048,13 +1048,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1064,13 +1064,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1080,13 +1080,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1096,13 +1096,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1112,13 +1112,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { @@ -1128,13 +1128,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -1144,13 +1144,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -1160,13 +1160,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -1176,13 +1176,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -1192,13 +1192,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -1208,13 +1208,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -1224,13 +1224,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -1240,13 +1240,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -1256,13 +1256,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -1272,13 +1272,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -1288,13 +1288,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -1304,13 +1304,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -1320,13 +1320,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -1336,13 +1336,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -1352,13 +1352,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamomaxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -1368,13 +1368,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamomaxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -1384,13 +1384,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamomaxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -1400,13 +1400,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamomaxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -1416,13 +1416,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamomaxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -1432,13 +1432,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -1448,13 +1448,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1464,13 +1464,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1480,13 +1480,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1496,13 +1496,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1512,13 +1512,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1528,13 +1528,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1544,13 +1544,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1560,13 +1560,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1576,13 +1576,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1592,13 +1592,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1608,13 +1608,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1624,13 +1624,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamomaxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1640,13 +1640,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamomaxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1656,13 +1656,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamomaxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1672,13 +1672,13 @@ // CHECK-RV32-LABEL: @test_vamomaxei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamomaxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1688,13 +1688,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1704,13 +1704,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1720,13 +1720,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1736,13 +1736,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1752,13 +1752,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1768,13 +1768,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1784,13 +1784,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1800,13 +1800,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1816,13 +1816,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1832,13 +1832,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1848,13 +1848,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1864,13 +1864,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1880,13 +1880,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1896,13 +1896,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1912,13 +1912,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamomaxuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1928,13 +1928,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamomaxuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1944,13 +1944,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamomaxuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1960,13 +1960,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamomaxuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1976,13 +1976,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamomaxuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1992,13 +1992,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -2008,13 +2008,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -2024,13 +2024,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -2040,13 +2040,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -2056,13 +2056,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -2072,13 +2072,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -2088,13 +2088,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -2104,13 +2104,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -2120,13 +2120,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -2136,13 +2136,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -2152,13 +2152,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -2168,13 +2168,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -2184,13 +2184,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamomaxuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -2200,13 +2200,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamomaxuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -2216,13 +2216,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamomaxuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -2232,13 +2232,13 @@ // CHECK-RV32-LABEL: @test_vamomaxuei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamomaxuei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamomaxuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamomin.c @@ -8,13 +8,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -24,13 +24,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -40,13 +40,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -56,13 +56,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -72,13 +72,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -88,13 +88,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -104,13 +104,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -120,13 +120,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -136,13 +136,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -152,13 +152,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -168,13 +168,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -184,13 +184,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -200,13 +200,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -216,13 +216,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -232,13 +232,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -248,13 +248,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -264,13 +264,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -280,13 +280,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -296,13 +296,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -312,13 +312,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -328,13 +328,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -344,13 +344,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -360,13 +360,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -376,13 +376,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -392,13 +392,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -408,13 +408,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -424,13 +424,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -440,13 +440,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -456,13 +456,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -472,13 +472,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -488,13 +488,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -504,13 +504,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -520,13 +520,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -536,13 +536,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -552,13 +552,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -568,13 +568,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -584,13 +584,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -600,13 +600,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -616,13 +616,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -632,13 +632,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -648,13 +648,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -664,13 +664,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -680,13 +680,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -696,13 +696,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -712,13 +712,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -728,13 +728,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -744,13 +744,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -760,13 +760,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -776,13 +776,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -792,13 +792,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -808,13 +808,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -824,13 +824,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -840,13 +840,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -856,13 +856,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -872,13 +872,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -888,13 +888,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -904,13 +904,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -920,13 +920,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -936,13 +936,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -952,13 +952,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -968,13 +968,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -984,13 +984,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1000,13 +1000,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1016,13 +1016,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1032,13 +1032,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1048,13 +1048,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1064,13 +1064,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1080,13 +1080,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1096,13 +1096,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1112,13 +1112,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { @@ -1128,13 +1128,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -1144,13 +1144,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -1160,13 +1160,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -1176,13 +1176,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -1192,13 +1192,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -1208,13 +1208,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -1224,13 +1224,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -1240,13 +1240,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -1256,13 +1256,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -1272,13 +1272,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -1288,13 +1288,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -1304,13 +1304,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -1320,13 +1320,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -1336,13 +1336,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -1352,13 +1352,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamominei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -1368,13 +1368,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamominei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -1384,13 +1384,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamominei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -1400,13 +1400,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamominei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -1416,13 +1416,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamominei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -1432,13 +1432,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -1448,13 +1448,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1464,13 +1464,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1480,13 +1480,13 @@ // CHECK-RV32-LABEL: @test_vamominei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1496,13 +1496,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1512,13 +1512,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1528,13 +1528,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1544,13 +1544,13 @@ // CHECK-RV32-LABEL: @test_vamominei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1560,13 +1560,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1576,13 +1576,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1592,13 +1592,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1608,13 +1608,13 @@ // CHECK-RV32-LABEL: @test_vamominei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1624,13 +1624,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamominei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1640,13 +1640,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamominei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1656,13 +1656,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamominei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1672,13 +1672,13 @@ // CHECK-RV32-LABEL: @test_vamominei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamominei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1688,13 +1688,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1704,13 +1704,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1720,13 +1720,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1736,13 +1736,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1752,13 +1752,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1768,13 +1768,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1784,13 +1784,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1800,13 +1800,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1816,13 +1816,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1832,13 +1832,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1848,13 +1848,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1864,13 +1864,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1880,13 +1880,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1896,13 +1896,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1912,13 +1912,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamominuei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1928,13 +1928,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamominuei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1944,13 +1944,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamominuei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1960,13 +1960,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamominuei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1976,13 +1976,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamominuei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1992,13 +1992,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -2008,13 +2008,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -2024,13 +2024,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -2040,13 +2040,13 @@ // CHECK-RV32-LABEL: @test_vamominuei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -2056,13 +2056,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -2072,13 +2072,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -2088,13 +2088,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -2104,13 +2104,13 @@ // CHECK-RV32-LABEL: @test_vamominuei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -2120,13 +2120,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -2136,13 +2136,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -2152,13 +2152,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -2168,13 +2168,13 @@ // CHECK-RV32-LABEL: @test_vamominuei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -2184,13 +2184,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamominuei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -2200,13 +2200,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamominuei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -2216,13 +2216,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamominuei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -2232,13 +2232,13 @@ // CHECK-RV32-LABEL: @test_vamominuei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamominuei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamominuei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoor.c @@ -8,13 +8,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -24,13 +24,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -40,13 +40,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -56,13 +56,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -72,13 +72,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -88,13 +88,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -104,13 +104,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -120,13 +120,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -136,13 +136,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -152,13 +152,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -168,13 +168,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -184,13 +184,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -200,13 +200,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -216,13 +216,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -232,13 +232,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -248,13 +248,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -264,13 +264,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -280,13 +280,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -296,13 +296,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -312,13 +312,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -328,13 +328,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -344,13 +344,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -360,13 +360,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -376,13 +376,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -392,13 +392,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -408,13 +408,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -424,13 +424,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -440,13 +440,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -456,13 +456,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -472,13 +472,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -488,13 +488,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -504,13 +504,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -520,13 +520,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -536,13 +536,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -552,13 +552,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -568,13 +568,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -584,13 +584,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -600,13 +600,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -616,13 +616,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -632,13 +632,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -648,13 +648,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -664,13 +664,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -680,13 +680,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -696,13 +696,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -712,13 +712,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -728,13 +728,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -744,13 +744,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -760,13 +760,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -776,13 +776,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -792,13 +792,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -808,13 +808,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -824,13 +824,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -840,13 +840,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -856,13 +856,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -872,13 +872,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -888,13 +888,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -904,13 +904,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -920,13 +920,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -936,13 +936,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -952,13 +952,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -968,13 +968,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -984,13 +984,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1000,13 +1000,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1016,13 +1016,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1032,13 +1032,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1048,13 +1048,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1064,13 +1064,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1080,13 +1080,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1096,13 +1096,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1112,13 +1112,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { @@ -1128,13 +1128,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -1144,13 +1144,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -1160,13 +1160,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -1176,13 +1176,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -1192,13 +1192,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -1208,13 +1208,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -1224,13 +1224,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -1240,13 +1240,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -1256,13 +1256,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -1272,13 +1272,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -1288,13 +1288,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -1304,13 +1304,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -1320,13 +1320,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -1336,13 +1336,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -1352,13 +1352,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -1368,13 +1368,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -1384,13 +1384,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -1400,13 +1400,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -1416,13 +1416,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -1432,13 +1432,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -1448,13 +1448,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1464,13 +1464,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1480,13 +1480,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1496,13 +1496,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1512,13 +1512,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1528,13 +1528,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1544,13 +1544,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1560,13 +1560,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1576,13 +1576,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1592,13 +1592,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1608,13 +1608,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1624,13 +1624,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1640,13 +1640,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1656,13 +1656,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1672,13 +1672,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1688,13 +1688,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1704,13 +1704,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1720,13 +1720,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1736,13 +1736,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1752,13 +1752,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1768,13 +1768,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1784,13 +1784,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1800,13 +1800,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1816,13 +1816,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1832,13 +1832,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1848,13 +1848,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1864,13 +1864,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1880,13 +1880,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1896,13 +1896,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1912,13 +1912,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1928,13 +1928,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1944,13 +1944,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1960,13 +1960,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1976,13 +1976,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1992,13 +1992,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -2008,13 +2008,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -2024,13 +2024,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -2040,13 +2040,13 @@ // CHECK-RV32-LABEL: @test_vamoorei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -2056,13 +2056,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -2072,13 +2072,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -2088,13 +2088,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -2104,13 +2104,13 @@ // CHECK-RV32-LABEL: @test_vamoorei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -2120,13 +2120,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -2136,13 +2136,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -2152,13 +2152,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -2168,13 +2168,13 @@ // CHECK-RV32-LABEL: @test_vamoorei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -2184,13 +2184,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -2200,13 +2200,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -2216,13 +2216,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -2232,13 +2232,13 @@ // CHECK-RV32-LABEL: @test_vamoorei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoorei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoswap.c @@ -10,13 +10,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, @@ -27,13 +27,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, @@ -44,13 +44,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, @@ -61,13 +61,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei8_v_i32m4(int32_t *base, vuint8m1_t bindex, @@ -78,13 +78,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei8_v_i32m8(int32_t *base, vuint8m2_t bindex, @@ -95,13 +95,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, @@ -112,13 +112,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, @@ -129,13 +129,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei16_v_i32m2(int32_t *base, vuint16m1_t bindex, @@ -146,13 +146,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei16_v_i32m4(int32_t *base, vuint16m2_t bindex, @@ -163,13 +163,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei16_v_i32m8(int32_t *base, vuint16m4_t bindex, @@ -180,13 +180,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, @@ -197,13 +197,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei32_v_i32m1(int32_t *base, vuint32m1_t bindex, @@ -214,13 +214,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei32_v_i32m2(int32_t *base, vuint32m2_t bindex, @@ -231,13 +231,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei32_v_i32m4(int32_t *base, vuint32m4_t bindex, @@ -248,13 +248,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei32_v_i32m8(int32_t *base, vuint32m8_t bindex, @@ -265,13 +265,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, @@ -282,13 +282,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei64_v_i32m1(int32_t *base, vuint64m2_t bindex, @@ -299,13 +299,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei64_v_i32m2(int32_t *base, vuint64m4_t bindex, @@ -316,13 +316,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei64_v_i32m4(int32_t *base, vuint64m8_t bindex, @@ -333,13 +333,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, @@ -350,13 +350,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, @@ -367,13 +367,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, @@ -384,13 +384,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei8_v_i64m8(int64_t *base, vuint8m1_t bindex, @@ -401,13 +401,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, @@ -418,13 +418,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, @@ -435,13 +435,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei16_v_i64m4(int64_t *base, vuint16m1_t bindex, @@ -452,13 +452,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei16_v_i64m8(int64_t *base, vuint16m2_t bindex, @@ -469,13 +469,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, @@ -486,13 +486,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei32_v_i64m2(int64_t *base, vuint32m1_t bindex, @@ -503,13 +503,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei32_v_i64m4(int64_t *base, vuint32m2_t bindex, @@ -520,13 +520,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei32_v_i64m8(int64_t *base, vuint32m4_t bindex, @@ -537,13 +537,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei64_v_i64m1(int64_t *base, vuint64m1_t bindex, @@ -554,13 +554,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei64_v_i64m2(int64_t *base, vuint64m2_t bindex, @@ -571,13 +571,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei64_v_i64m4(int64_t *base, vuint64m4_t bindex, @@ -588,13 +588,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei64_v_i64m8(int64_t *base, vuint64m8_t bindex, @@ -605,13 +605,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, @@ -622,13 +622,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, @@ -639,13 +639,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, @@ -656,13 +656,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, @@ -673,13 +673,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, @@ -690,13 +690,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, @@ -707,13 +707,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, @@ -724,13 +724,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, @@ -741,13 +741,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, @@ -758,13 +758,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, @@ -775,13 +775,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, @@ -792,13 +792,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, @@ -809,13 +809,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, @@ -826,13 +826,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, @@ -843,13 +843,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, @@ -860,13 +860,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, @@ -877,13 +877,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, @@ -894,13 +894,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, @@ -911,13 +911,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, @@ -928,13 +928,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, @@ -945,13 +945,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, @@ -962,13 +962,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, @@ -979,13 +979,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, @@ -996,13 +996,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, @@ -1013,13 +1013,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, @@ -1030,13 +1030,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, @@ -1047,13 +1047,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, @@ -1064,13 +1064,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, @@ -1081,13 +1081,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, @@ -1098,13 +1098,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, @@ -1115,13 +1115,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, @@ -1132,13 +1132,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, @@ -1149,13 +1149,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, @@ -1166,13 +1166,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, @@ -1183,13 +1183,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, @@ -1200,13 +1200,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei8_v_f32mf2(float *base, vuint8mf8_t bindex, @@ -1217,13 +1217,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei8_v_f32m1(float *base, vuint8mf4_t bindex, @@ -1234,13 +1234,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei8_v_f32m2(float *base, vuint8mf2_t bindex, @@ -1251,13 +1251,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei8_v_f32m4(float *base, vuint8m1_t bindex, @@ -1268,13 +1268,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei8_v_f32m8(float *base, vuint8m2_t bindex, @@ -1285,13 +1285,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei16_v_f32mf2(float *base, vuint16mf4_t bindex, @@ -1302,13 +1302,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei16_v_f32m1(float *base, vuint16mf2_t bindex, @@ -1319,13 +1319,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei16_v_f32m2(float *base, vuint16m1_t bindex, @@ -1336,13 +1336,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei16_v_f32m4(float *base, vuint16m2_t bindex, @@ -1353,13 +1353,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei16_v_f32m8(float *base, vuint16m4_t bindex, @@ -1370,13 +1370,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei32_v_f32mf2(float *base, vuint32mf2_t bindex, @@ -1387,13 +1387,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei32_v_f32m1(float *base, vuint32m1_t bindex, @@ -1404,13 +1404,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei32_v_f32m2(float *base, vuint32m2_t bindex, @@ -1421,13 +1421,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei32_v_f32m4(float *base, vuint32m4_t bindex, @@ -1438,13 +1438,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei32_v_f32m8(float *base, vuint32m8_t bindex, @@ -1455,13 +1455,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei64_v_f32mf2(float *base, vuint64m1_t bindex, @@ -1472,13 +1472,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei64_v_f32m1(float *base, vuint64m2_t bindex, @@ -1489,13 +1489,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei64_v_f32m2(float *base, vuint64m4_t bindex, @@ -1506,13 +1506,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei64_v_f32m4(float *base, vuint64m8_t bindex, @@ -1523,13 +1523,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei8_v_f64m1(double *base, vuint8mf8_t bindex, @@ -1540,13 +1540,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei8_v_f64m2(double *base, vuint8mf4_t bindex, @@ -1557,13 +1557,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei8_v_f64m4(double *base, vuint8mf2_t bindex, @@ -1574,13 +1574,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei8_v_f64m8(double *base, vuint8m1_t bindex, @@ -1591,13 +1591,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei16_v_f64m1(double *base, vuint16mf4_t bindex, @@ -1608,13 +1608,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei16_v_f64m2(double *base, vuint16mf2_t bindex, @@ -1625,13 +1625,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei16_v_f64m4(double *base, vuint16m1_t bindex, @@ -1642,13 +1642,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei16_v_f64m8(double *base, vuint16m2_t bindex, @@ -1659,13 +1659,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei32_v_f64m1(double *base, vuint32mf2_t bindex, @@ -1676,13 +1676,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei32_v_f64m2(double *base, vuint32m1_t bindex, @@ -1693,13 +1693,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei32_v_f64m4(double *base, vuint32m2_t bindex, @@ -1710,13 +1710,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei32_v_f64m8(double *base, vuint32m4_t bindex, @@ -1727,13 +1727,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei64_v_f64m1(double *base, vuint64m1_t bindex, @@ -1744,13 +1744,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei64_v_f64m2(double *base, vuint64m2_t bindex, @@ -1761,13 +1761,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei64_v_f64m4(double *base, vuint64m4_t bindex, @@ -1778,13 +1778,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei64_v_f64m8(double *base, vuint64m8_t bindex, @@ -1795,13 +1795,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei8_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -1813,13 +1813,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei8_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -1831,13 +1831,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei8_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -1849,13 +1849,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei8_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -1867,13 +1867,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei8_v_i32m8_m(vbool4_t mask, int32_t *base, @@ -1885,13 +1885,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei16_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -1903,13 +1903,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei16_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -1921,13 +1921,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei16_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -1939,13 +1939,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei16_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -1957,13 +1957,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei16_v_i32m8_m(vbool4_t mask, int32_t *base, @@ -1975,13 +1975,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei32_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -1993,13 +1993,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei32_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -2011,13 +2011,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei32_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -2029,13 +2029,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei32_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -2047,13 +2047,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoswapei32_v_i32m8_m(vbool4_t mask, int32_t *base, @@ -2065,13 +2065,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoswapei64_v_i32mf2_m(vbool64_t mask, int32_t *base, @@ -2083,13 +2083,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoswapei64_v_i32m1_m(vbool32_t mask, int32_t *base, @@ -2101,13 +2101,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoswapei64_v_i32m2_m(vbool16_t mask, int32_t *base, @@ -2119,13 +2119,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoswapei64_v_i32m4_m(vbool8_t mask, int32_t *base, @@ -2137,13 +2137,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei8_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -2155,13 +2155,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei8_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -2173,13 +2173,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei8_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -2191,13 +2191,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei8_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -2209,13 +2209,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei16_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -2227,13 +2227,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei16_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -2245,13 +2245,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei16_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -2263,13 +2263,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei16_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -2281,13 +2281,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei32_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -2299,13 +2299,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei32_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -2317,13 +2317,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei32_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -2335,13 +2335,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei32_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -2353,13 +2353,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoswapei64_v_i64m1_m(vbool64_t mask, int64_t *base, @@ -2371,13 +2371,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoswapei64_v_i64m2_m(vbool32_t mask, int64_t *base, @@ -2389,13 +2389,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoswapei64_v_i64m4_m(vbool16_t mask, int64_t *base, @@ -2407,13 +2407,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoswapei64_v_i64m8_m(vbool8_t mask, int64_t *base, @@ -2425,13 +2425,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -2443,13 +2443,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei8_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -2461,13 +2461,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei8_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -2479,13 +2479,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei8_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -2497,13 +2497,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei8_v_u32m8_m(vbool4_t mask, uint32_t *base, @@ -2515,13 +2515,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -2533,13 +2533,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei16_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -2551,13 +2551,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei16_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -2569,13 +2569,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei16_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -2587,13 +2587,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei16_v_u32m8_m(vbool4_t mask, uint32_t *base, @@ -2605,13 +2605,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -2623,13 +2623,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei32_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -2641,13 +2641,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei32_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -2659,13 +2659,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei32_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -2677,13 +2677,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoswapei32_v_u32m8_m(vbool4_t mask, uint32_t *base, @@ -2695,13 +2695,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoswapei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, @@ -2713,13 +2713,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoswapei64_v_u32m1_m(vbool32_t mask, uint32_t *base, @@ -2731,13 +2731,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoswapei64_v_u32m2_m(vbool16_t mask, uint32_t *base, @@ -2749,13 +2749,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoswapei64_v_u32m4_m(vbool8_t mask, uint32_t *base, @@ -2767,13 +2767,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei8_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -2785,13 +2785,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei8_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -2803,13 +2803,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei8_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -2821,13 +2821,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei8_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -2839,13 +2839,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei16_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -2857,13 +2857,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei16_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -2875,13 +2875,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei16_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -2893,13 +2893,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei16_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -2911,13 +2911,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei32_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -2929,13 +2929,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei32_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -2947,13 +2947,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei32_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -2965,13 +2965,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei32_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -2983,13 +2983,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoswapei64_v_u64m1_m(vbool64_t mask, uint64_t *base, @@ -3001,13 +3001,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoswapei64_v_u64m2_m(vbool32_t mask, uint64_t *base, @@ -3019,13 +3019,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoswapei64_v_u64m4_m(vbool16_t mask, uint64_t *base, @@ -3037,13 +3037,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoswapei64_v_u64m8_m(vbool8_t mask, uint64_t *base, @@ -3055,13 +3055,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei8_v_f32mf2_m(vbool64_t mask, float *base, @@ -3073,13 +3073,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei8_v_f32m1_m(vbool32_t mask, float *base, @@ -3091,13 +3091,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei8_v_f32m2_m(vbool16_t mask, float *base, @@ -3109,13 +3109,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei8_v_f32m4_m(vbool8_t mask, float *base, @@ -3127,13 +3127,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei8_v_f32m8_m(vbool4_t mask, float *base, @@ -3145,13 +3145,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei16_v_f32mf2_m(vbool64_t mask, float *base, @@ -3163,13 +3163,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei16_v_f32m1_m(vbool32_t mask, float *base, @@ -3181,13 +3181,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei16_v_f32m2_m(vbool16_t mask, float *base, @@ -3199,13 +3199,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei16_v_f32m4_m(vbool8_t mask, float *base, @@ -3217,13 +3217,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei16_v_f32m8_m(vbool4_t mask, float *base, @@ -3235,13 +3235,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei32_v_f32mf2_m(vbool64_t mask, float *base, @@ -3253,13 +3253,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei32_v_f32m1_m(vbool32_t mask, float *base, @@ -3271,13 +3271,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei32_v_f32m2_m(vbool16_t mask, float *base, @@ -3289,13 +3289,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei32_v_f32m4_m(vbool8_t mask, float *base, @@ -3307,13 +3307,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vamoswapei32_v_f32m8_m(vbool4_t mask, float *base, @@ -3325,13 +3325,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vamoswapei64_v_f32mf2_m(vbool64_t mask, float *base, @@ -3343,13 +3343,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vamoswapei64_v_f32m1_m(vbool32_t mask, float *base, @@ -3361,13 +3361,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vamoswapei64_v_f32m2_m(vbool16_t mask, float *base, @@ -3379,13 +3379,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vamoswapei64_v_f32m4_m(vbool8_t mask, float *base, @@ -3397,13 +3397,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei8_v_f64m1_m(vbool64_t mask, double *base, @@ -3415,13 +3415,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei8_v_f64m2_m(vbool32_t mask, double *base, @@ -3433,13 +3433,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei8_v_f64m4_m(vbool16_t mask, double *base, @@ -3451,13 +3451,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei8_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei8_v_f64m8_m(vbool8_t mask, double *base, @@ -3469,13 +3469,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei16_v_f64m1_m(vbool64_t mask, double *base, @@ -3487,13 +3487,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei16_v_f64m2_m(vbool32_t mask, double *base, @@ -3505,13 +3505,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei16_v_f64m4_m(vbool16_t mask, double *base, @@ -3523,13 +3523,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei16_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei16_v_f64m8_m(vbool8_t mask, double *base, @@ -3541,13 +3541,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei32_v_f64m1_m(vbool64_t mask, double *base, @@ -3559,13 +3559,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei32_v_f64m2_m(vbool32_t mask, double *base, @@ -3577,13 +3577,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei32_v_f64m4_m(vbool16_t mask, double *base, @@ -3595,13 +3595,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei32_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei32_v_f64m8_m(vbool8_t mask, double *base, @@ -3613,13 +3613,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vamoswapei64_v_f64m1_m(vbool64_t mask, double *base, @@ -3631,13 +3631,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vamoswapei64_v_f64m2_m(vbool32_t mask, double *base, @@ -3649,13 +3649,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vamoswapei64_v_f64m4_m(vbool16_t mask, double *base, @@ -3667,13 +3667,13 @@ // CHECK-RV32-LABEL: @test_vamoswapei64_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoswapei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vamoswapei64_v_f64m8_m(vbool8_t mask, double *base, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vamoxor.c @@ -8,13 +8,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -24,13 +24,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -40,13 +40,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -56,13 +56,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -72,13 +72,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -88,13 +88,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -104,13 +104,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -120,13 +120,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -136,13 +136,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -152,13 +152,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -168,13 +168,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -184,13 +184,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -200,13 +200,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -216,13 +216,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -232,13 +232,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -248,13 +248,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -264,13 +264,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -280,13 +280,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -296,13 +296,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -312,13 +312,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -328,13 +328,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -344,13 +344,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -360,13 +360,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -376,13 +376,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -392,13 +392,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -408,13 +408,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -424,13 +424,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -440,13 +440,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -456,13 +456,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -472,13 +472,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -488,13 +488,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -504,13 +504,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -520,13 +520,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -536,13 +536,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -552,13 +552,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -568,13 +568,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -584,13 +584,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -600,13 +600,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -616,13 +616,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -632,13 +632,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -648,13 +648,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -664,13 +664,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -680,13 +680,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -696,13 +696,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -712,13 +712,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -728,13 +728,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -744,13 +744,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -760,13 +760,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -776,13 +776,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -792,13 +792,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -808,13 +808,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -824,13 +824,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -840,13 +840,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -856,13 +856,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -872,13 +872,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -888,13 +888,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -904,13 +904,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -920,13 +920,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -936,13 +936,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -952,13 +952,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -968,13 +968,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -984,13 +984,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -1000,13 +1000,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -1016,13 +1016,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -1032,13 +1032,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -1048,13 +1048,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -1064,13 +1064,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -1080,13 +1080,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -1096,13 +1096,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -1112,13 +1112,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { @@ -1128,13 +1128,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { @@ -1144,13 +1144,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { @@ -1160,13 +1160,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { @@ -1176,13 +1176,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { @@ -1192,13 +1192,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { @@ -1208,13 +1208,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { @@ -1224,13 +1224,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { @@ -1240,13 +1240,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { @@ -1256,13 +1256,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { @@ -1272,13 +1272,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { @@ -1288,13 +1288,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { @@ -1304,13 +1304,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { @@ -1320,13 +1320,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { @@ -1336,13 +1336,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { @@ -1352,13 +1352,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vamoxorei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { @@ -1368,13 +1368,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vamoxorei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { @@ -1384,13 +1384,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vamoxorei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { @@ -1400,13 +1400,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vamoxorei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { @@ -1416,13 +1416,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vamoxorei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { @@ -1432,13 +1432,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { @@ -1448,13 +1448,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { @@ -1464,13 +1464,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { @@ -1480,13 +1480,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { @@ -1496,13 +1496,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { @@ -1512,13 +1512,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { @@ -1528,13 +1528,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { @@ -1544,13 +1544,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { @@ -1560,13 +1560,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { @@ -1576,13 +1576,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { @@ -1592,13 +1592,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { @@ -1608,13 +1608,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { @@ -1624,13 +1624,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vamoxorei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { @@ -1640,13 +1640,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vamoxorei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { @@ -1656,13 +1656,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vamoxorei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { @@ -1672,13 +1672,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vamoxorei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { @@ -1688,13 +1688,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { @@ -1704,13 +1704,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { @@ -1720,13 +1720,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { @@ -1736,13 +1736,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { @@ -1752,13 +1752,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { @@ -1768,13 +1768,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { @@ -1784,13 +1784,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { @@ -1800,13 +1800,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { @@ -1816,13 +1816,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { @@ -1832,13 +1832,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { @@ -1848,13 +1848,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { @@ -1864,13 +1864,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { @@ -1880,13 +1880,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { @@ -1896,13 +1896,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { @@ -1912,13 +1912,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vamoxorei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { @@ -1928,13 +1928,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vamoxorei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { @@ -1944,13 +1944,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vamoxorei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { @@ -1960,13 +1960,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vamoxorei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { @@ -1976,13 +1976,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vamoxorei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { @@ -1992,13 +1992,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { @@ -2008,13 +2008,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { @@ -2024,13 +2024,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { @@ -2040,13 +2040,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { @@ -2056,13 +2056,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { @@ -2072,13 +2072,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { @@ -2088,13 +2088,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { @@ -2104,13 +2104,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { @@ -2120,13 +2120,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { @@ -2136,13 +2136,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { @@ -2152,13 +2152,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { @@ -2168,13 +2168,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { @@ -2184,13 +2184,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vamoxorei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { @@ -2200,13 +2200,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vamoxorei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { @@ -2216,13 +2216,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vamoxorei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { @@ -2232,13 +2232,13 @@ // CHECK-RV32-LABEL: @test_vamoxorei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vamoxorei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vamoxorei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -834,12 +834,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -848,12 +848,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -863,12 +863,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -905,12 +905,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -919,12 +919,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -933,12 +933,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -947,12 +947,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -961,12 +961,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -975,12 +975,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -989,12 +989,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1004,12 +1004,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1018,12 +1018,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1032,12 +1032,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1046,12 +1046,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1060,12 +1060,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1074,12 +1074,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1088,12 +1088,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1102,12 +1102,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1116,12 +1116,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1130,12 +1130,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1144,12 +1144,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1158,12 +1158,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1172,12 +1172,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1186,12 +1186,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1214,12 +1214,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1228,12 +1228,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1242,12 +1242,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1257,12 +1257,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1272,12 +1272,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1287,12 +1287,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1302,12 +1302,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1317,12 +1317,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1332,12 +1332,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1347,12 +1347,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1362,12 +1362,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1377,12 +1377,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1392,12 +1392,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1422,12 +1422,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1437,12 +1437,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1452,12 +1452,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1468,12 +1468,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1483,12 +1483,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1499,12 +1499,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1514,12 +1514,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1529,12 +1529,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1544,12 +1544,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1559,12 +1559,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1574,12 +1574,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1604,12 +1604,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1619,12 +1619,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1634,12 +1634,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1650,12 +1650,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1665,12 +1665,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1680,12 +1680,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1695,12 +1695,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1710,12 +1710,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1725,12 +1725,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1740,12 +1740,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1755,12 +1755,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1770,12 +1770,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1800,12 +1800,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1815,12 +1815,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1830,12 +1830,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1845,12 +1845,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1860,12 +1860,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1875,12 +1875,12 @@ // CHECK-RV32-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1890,12 +1890,12 @@ // CHECK-RV32-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1905,12 +1905,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1921,12 +1921,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1936,12 +1936,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1952,12 +1952,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1983,12 +1983,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1998,12 +1998,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2013,12 +2013,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2028,12 +2028,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2043,12 +2043,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2058,12 +2058,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2073,12 +2073,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2088,12 +2088,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2103,12 +2103,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2118,12 +2118,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2134,12 +2134,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2150,12 +2150,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2166,12 +2166,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2182,12 +2182,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2198,12 +2198,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2213,12 +2213,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2229,12 +2229,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2244,12 +2244,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2260,12 +2260,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2291,12 +2291,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2306,12 +2306,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2322,12 +2322,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2338,12 +2338,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2354,12 +2354,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2369,12 +2369,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2385,12 +2385,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2400,12 +2400,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2416,12 +2416,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2431,12 +2431,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2447,12 +2447,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2462,12 +2462,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2478,12 +2478,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2493,12 +2493,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2509,12 +2509,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2524,12 +2524,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2540,12 +2540,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2555,12 +2555,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2571,12 +2571,12 @@ // CHECK-RV32-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -205,12 +205,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -387,12 +387,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -415,12 +415,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -471,12 +471,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -485,12 +485,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -499,12 +499,12 @@ // CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { @@ -205,12 +205,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { @@ -387,12 +387,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { @@ -415,12 +415,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { @@ -471,12 +471,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { @@ -485,12 +485,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { @@ -499,12 +499,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { @@ -541,12 +541,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { @@ -555,12 +555,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { @@ -569,12 +569,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { @@ -583,12 +583,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { @@ -597,12 +597,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { @@ -611,12 +611,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { @@ -625,12 +625,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { @@ -653,12 +653,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { @@ -681,12 +681,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { @@ -695,12 +695,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { @@ -709,12 +709,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { @@ -723,12 +723,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { @@ -737,12 +737,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { @@ -751,12 +751,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { @@ -765,12 +765,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -780,12 +780,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -795,12 +795,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -810,12 +810,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -825,12 +825,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -840,12 +840,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -855,12 +855,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -870,12 +870,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -885,12 +885,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -900,12 +900,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -915,12 +915,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -930,12 +930,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, @@ -946,12 +946,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -961,12 +961,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -976,12 +976,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -991,12 +991,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1006,12 +1006,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1021,12 +1021,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1036,12 +1036,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1051,12 +1051,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1066,12 +1066,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -1081,12 +1081,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1096,12 +1096,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1111,12 +1111,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1126,12 +1126,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, @@ -1157,12 +1157,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1172,12 +1172,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1187,12 +1187,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1202,12 +1202,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1217,12 +1217,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1232,12 +1232,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1247,12 +1247,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1262,12 +1262,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1277,12 +1277,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1292,12 +1292,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1307,12 +1307,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1322,12 +1322,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1352,12 +1352,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1367,12 +1367,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1382,12 +1382,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1397,12 +1397,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1412,12 +1412,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1427,12 +1427,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1442,12 +1442,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1457,12 +1457,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1472,12 +1472,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1487,12 +1487,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1502,12 +1502,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1517,12 +1517,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1532,12 +1532,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1562,12 +1562,12 @@ // CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -256,12 +256,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -270,12 +270,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -286,12 +286,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -332,12 +332,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -348,12 +348,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -363,12 +363,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -379,12 +379,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -394,12 +394,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -410,12 +410,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -425,12 +425,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -456,12 +456,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -503,12 +503,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -518,12 +518,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c @@ -7,96 +7,96 @@ // CHECK-RV32-LABEL: @test_vfirst_m_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst(op1, vl); } // CHECK-RV32-LABEL: @test_vfirst_m_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst(op1, vl); } // CHECK-RV32-LABEL: @test_vfirst_m_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst(op1, vl); } // CHECK-RV32-LABEL: @test_vfirst_m_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst(op1, vl); } // CHECK-RV32-LABEL: @test_vfirst_m_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b16(vbool16_t op1, size_t vl) { return vfirst(op1, vl); } // CHECK-RV32-LABEL: @test_vfirst_m_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b32(vbool32_t op1, size_t vl) { return vfirst(op1, vl); } // CHECK-RV32-LABEL: @test_vfirst_m_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b64(vbool64_t op1, size_t vl) { return vfirst(op1, vl); } // CHECK-RV32-LABEL: @test_vfirst_m_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv64i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv64i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vfirst_m_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv32i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv32i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vfirst_m_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv16i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv16i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vfirst_m_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv8i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv8i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vfirst_m_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv4i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv4i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vfirst_m_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv2i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv2i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vfirst_m_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv1i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv1i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vfirst_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -310,12 +310,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -326,12 +326,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -341,12 +341,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -372,12 +372,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -403,12 +403,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -419,12 +419,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -434,12 +434,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -450,12 +450,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -465,12 +465,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -481,12 +481,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -496,12 +496,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -512,12 +512,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -543,12 +543,12 @@ // CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -310,12 +310,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -326,12 +326,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -341,12 +341,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -372,12 +372,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -403,12 +403,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -419,12 +419,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -434,12 +434,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -450,12 +450,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -465,12 +465,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -481,12 +481,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -496,12 +496,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -512,12 +512,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -543,12 +543,12 @@ // CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -256,12 +256,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -270,12 +270,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -286,12 +286,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -332,12 +332,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -348,12 +348,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -363,12 +363,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -379,12 +379,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -394,12 +394,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -410,12 +410,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -425,12 +425,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -456,12 +456,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -503,12 +503,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -518,12 +518,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -256,12 +256,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -270,12 +270,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -286,12 +286,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -332,12 +332,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -348,12 +348,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -363,12 +363,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -379,12 +379,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -394,12 +394,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -410,12 +410,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -425,12 +425,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -456,12 +456,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -503,12 +503,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -518,12 +518,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -310,12 +310,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -326,12 +326,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -341,12 +341,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -372,12 +372,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -403,12 +403,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -419,12 +419,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -434,12 +434,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -450,12 +450,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -465,12 +465,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -481,12 +481,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -496,12 +496,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -512,12 +512,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -543,12 +543,12 @@ // CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -310,12 +310,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -326,12 +326,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1, @@ -341,12 +341,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1, @@ -372,12 +372,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -403,12 +403,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -419,12 +419,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -434,12 +434,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -450,12 +450,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -465,12 +465,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -481,12 +481,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -496,12 +496,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -512,12 +512,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -543,12 +543,12 @@ // CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -256,12 +256,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -270,12 +270,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -286,12 +286,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -332,12 +332,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -348,12 +348,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -363,12 +363,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -379,12 +379,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -394,12 +394,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -410,12 +410,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -425,12 +425,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -456,12 +456,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -503,12 +503,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -518,12 +518,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c @@ -9,24 +9,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f32mf2_f32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret float [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) { @@ -35,24 +35,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f32m1_f32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret float [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) { @@ -61,24 +61,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f32m2_f32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret float [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) { @@ -87,24 +87,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f32m4_f32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret float [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) { @@ -113,24 +113,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f32m8_f32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret float [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i32( [[DST:%.*]], float [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) { @@ -139,24 +139,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f64m1_f64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret double [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) { @@ -165,24 +165,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f64m2_f64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret double [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) { @@ -191,24 +191,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f64m4_f64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret double [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) { @@ -217,24 +217,24 @@ // CHECK-RV32-LABEL: @test_vfmv_f_s_f64m8_f64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret double [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { return vfmv_f(src); } // CHECK-RV32-LABEL: @test_vfmv_s_f_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i32( [[DST:%.*]], double [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { @@ -205,12 +205,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { @@ -387,12 +387,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { @@ -415,12 +415,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { @@ -471,12 +471,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { @@ -485,12 +485,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { @@ -499,12 +499,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { @@ -541,12 +541,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { @@ -555,12 +555,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { @@ -569,12 +569,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { @@ -583,12 +583,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { @@ -597,12 +597,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { @@ -611,12 +611,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { @@ -625,12 +625,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { @@ -653,12 +653,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { @@ -681,12 +681,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { @@ -695,12 +695,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { @@ -709,12 +709,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { @@ -723,12 +723,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { @@ -737,12 +737,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -752,12 +752,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, @@ -768,12 +768,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -783,12 +783,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, @@ -799,12 +799,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -814,12 +814,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -829,12 +829,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -844,12 +844,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -874,12 +874,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -904,12 +904,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, @@ -920,12 +920,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -935,12 +935,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, @@ -951,12 +951,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -966,12 +966,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -997,12 +997,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1012,12 +1012,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1027,12 +1027,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1042,12 +1042,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, @@ -1073,12 +1073,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1088,12 +1088,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1103,12 +1103,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1118,12 +1118,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1133,12 +1133,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1148,12 +1148,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1163,12 +1163,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1178,12 +1178,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, @@ -1194,12 +1194,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1209,12 +1209,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1240,12 +1240,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, @@ -1256,12 +1256,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1271,12 +1271,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1286,12 +1286,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, @@ -1302,12 +1302,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1317,12 +1317,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1332,12 +1332,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1347,12 +1347,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, @@ -1363,12 +1363,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1378,12 +1378,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1408,12 +1408,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, @@ -1424,12 +1424,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, @@ -1440,12 +1440,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1455,12 +1455,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, @@ -1471,12 +1471,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1486,12 +1486,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, @@ -1502,12 +1502,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1517,12 +1517,12 @@ // CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -311,12 +311,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -327,12 +327,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -342,12 +342,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -358,12 +358,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -389,12 +389,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -404,12 +404,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -420,12 +420,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -435,12 +435,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -451,12 +451,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -466,12 +466,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -482,12 +482,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -544,12 +544,12 @@ // CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -311,12 +311,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -327,12 +327,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -342,12 +342,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -358,12 +358,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -389,12 +389,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -404,12 +404,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -420,12 +420,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -435,12 +435,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -451,12 +451,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -466,12 +466,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -482,12 +482,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -544,12 +544,12 @@ // CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -311,12 +311,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -327,12 +327,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -342,12 +342,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -358,12 +358,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -389,12 +389,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -404,12 +404,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -420,12 +420,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -435,12 +435,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -451,12 +451,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -466,12 +466,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -482,12 +482,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -544,12 +544,12 @@ // CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc, @@ -311,12 +311,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -327,12 +327,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, @@ -342,12 +342,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -358,12 +358,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc, @@ -389,12 +389,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1, @@ -404,12 +404,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc, @@ -420,12 +420,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1, @@ -435,12 +435,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -451,12 +451,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -466,12 +466,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -482,12 +482,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -544,12 +544,12 @@ // CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i32( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64( [[ACC:%.*]], double [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vfrec7_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst, @@ -25,12 +25,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, @@ -40,12 +40,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, @@ -55,12 +55,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, @@ -70,12 +70,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, @@ -85,12 +85,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, @@ -100,12 +100,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, @@ -115,12 +115,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, @@ -130,12 +130,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, @@ -145,12 +145,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -193,12 +193,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -209,12 +209,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -257,12 +257,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst, @@ -25,12 +25,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, @@ -40,12 +40,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, @@ -55,12 +55,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, @@ -70,12 +70,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, @@ -85,12 +85,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, @@ -100,12 +100,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, @@ -115,12 +115,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, @@ -130,12 +130,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, @@ -145,12 +145,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -193,12 +193,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -209,12 +209,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -257,12 +257,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst, @@ -25,12 +25,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector, @@ -40,12 +40,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector, @@ -55,12 +55,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector, @@ -70,12 +70,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector, @@ -85,12 +85,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector, @@ -100,12 +100,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector, @@ -115,12 +115,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector, @@ -130,12 +130,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector, @@ -145,12 +145,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -193,12 +193,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -209,12 +209,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -257,12 +257,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst, @@ -305,12 +305,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst, @@ -321,12 +321,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst, @@ -353,12 +353,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst, @@ -369,12 +369,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst, @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst, @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst, @@ -417,12 +417,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst, @@ -433,12 +433,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst, @@ -449,12 +449,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -465,12 +465,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst, @@ -481,12 +481,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst, @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst, @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -529,12 +529,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -545,12 +545,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -561,12 +561,12 @@ // CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -256,12 +256,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -270,12 +270,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -285,12 +285,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -299,12 +299,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -314,12 +314,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -328,12 +328,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -372,12 +372,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -386,12 +386,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -415,12 +415,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -459,12 +459,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -473,12 +473,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -488,12 +488,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -546,12 +546,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -560,12 +560,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -575,12 +575,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -589,12 +589,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -604,12 +604,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -618,12 +618,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -633,12 +633,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -647,12 +647,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -662,12 +662,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -676,12 +676,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -691,12 +691,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -705,12 +705,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -720,12 +720,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -734,12 +734,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -778,12 +778,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -792,12 +792,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -808,12 +808,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -823,12 +823,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -839,12 +839,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -854,12 +854,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -870,12 +870,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -885,12 +885,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -901,12 +901,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -916,12 +916,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -932,12 +932,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -947,12 +947,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -963,12 +963,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -978,12 +978,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -994,12 +994,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1009,12 +1009,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1025,12 +1025,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1040,12 +1040,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1056,12 +1056,12 @@ // CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -1087,12 +1087,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -1103,12 +1103,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1119,12 +1119,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1134,12 +1134,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1150,12 +1150,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1165,12 +1165,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1181,12 +1181,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1196,12 +1196,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1212,12 +1212,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1258,12 +1258,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1274,12 +1274,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1289,12 +1289,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1305,12 +1305,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1320,12 +1320,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1336,12 +1336,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -1367,12 +1367,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -1383,12 +1383,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1399,12 +1399,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -1414,12 +1414,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1430,12 +1430,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -1445,12 +1445,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1461,12 +1461,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -1476,12 +1476,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1492,12 +1492,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -1507,12 +1507,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1523,12 +1523,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1538,12 +1538,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1554,12 +1554,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1569,12 +1569,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1585,12 +1585,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1600,12 +1600,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1616,12 +1616,12 @@ // CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, @@ -178,12 +178,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -211,12 +211,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, @@ -244,12 +244,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, @@ -278,12 +278,12 @@ // CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i32( [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i32( [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -193,12 +193,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -209,12 +209,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -257,12 +257,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, @@ -256,12 +256,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { @@ -270,12 +270,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -286,12 +286,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -332,12 +332,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -348,12 +348,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -363,12 +363,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -379,12 +379,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -394,12 +394,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -410,12 +410,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -425,12 +425,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -456,12 +456,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -503,12 +503,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -518,12 +518,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vfsub_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -257,12 +257,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -272,12 +272,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -288,12 +288,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -319,12 +319,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -334,12 +334,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -350,12 +350,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -365,12 +365,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -381,12 +381,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -396,12 +396,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { @@ -205,12 +205,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { @@ -387,12 +387,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { @@ -415,12 +415,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { @@ -471,12 +471,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { @@ -485,12 +485,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { @@ -499,12 +499,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { @@ -541,12 +541,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, @@ -557,12 +557,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -572,12 +572,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -587,12 +587,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -602,12 +602,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -617,12 +617,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, @@ -633,12 +633,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -648,12 +648,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -663,12 +663,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -678,12 +678,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -708,12 +708,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -723,12 +723,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -738,12 +738,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -753,12 +753,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -768,12 +768,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -783,12 +783,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -798,12 +798,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -813,12 +813,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -828,12 +828,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, @@ -844,12 +844,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -890,12 +890,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, @@ -906,12 +906,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -921,12 +921,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -936,12 +936,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -951,12 +951,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -966,12 +966,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -981,12 +981,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -996,12 +996,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1011,12 +1011,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1026,12 +1026,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1041,12 +1041,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -1056,12 +1056,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -1086,12 +1086,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -1101,12 +1101,12 @@ // CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -145,12 +145,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -160,12 +160,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -176,12 +176,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -207,12 +207,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -222,12 +222,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -238,12 +238,12 @@ // CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -145,12 +145,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -160,12 +160,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -176,12 +176,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -207,12 +207,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -222,12 +222,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -238,12 +238,12 @@ // CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -141,12 +141,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -156,12 +156,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -218,12 +218,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -145,12 +145,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -160,12 +160,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -176,12 +176,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -207,12 +207,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -222,12 +222,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -238,12 +238,12 @@ // CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -145,12 +145,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc, @@ -160,12 +160,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -176,12 +176,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc, @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -207,12 +207,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc, @@ -222,12 +222,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc, @@ -238,12 +238,12 @@ // CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i32( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64( [[ACC:%.*]], float [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst, @@ -25,12 +25,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst, @@ -41,12 +41,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst, @@ -57,12 +57,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst, @@ -73,12 +73,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst, @@ -89,12 +89,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -137,12 +137,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, @@ -153,12 +153,12 @@ // CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst, @@ -185,12 +185,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst, @@ -201,12 +201,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst, @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -265,12 +265,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst, @@ -281,12 +281,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst, @@ -297,12 +297,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst, @@ -313,12 +313,12 @@ // CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, @@ -111,12 +111,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { @@ -125,12 +125,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, @@ -140,12 +140,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { @@ -154,12 +154,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { @@ -212,12 +212,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, @@ -227,12 +227,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { @@ -241,12 +241,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -257,12 +257,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -272,12 +272,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -288,12 +288,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -319,12 +319,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -334,12 +334,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -350,12 +350,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -365,12 +365,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -381,12 +381,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -396,12 +396,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { @@ -66,12 +66,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { @@ -80,12 +80,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { @@ -94,12 +94,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { @@ -108,12 +108,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -123,12 +123,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -153,12 +153,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -168,12 +168,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -198,12 +198,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -213,12 +213,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -228,12 +228,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -243,12 +243,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -258,12 +258,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -288,12 +288,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -318,12 +318,12 @@ // CHECK-RV32-LABEL: @test_vid_v_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vid_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vid.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_viota_m_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_viota_m_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.viota.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c @@ -10,13 +10,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { @@ -26,13 +26,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { @@ -42,13 +42,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { @@ -58,13 +58,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { @@ -74,13 +74,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { @@ -90,13 +90,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { @@ -106,13 +106,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { @@ -122,13 +122,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { @@ -138,13 +138,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { @@ -154,13 +154,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { @@ -170,13 +170,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { @@ -186,13 +186,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { @@ -202,13 +202,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { @@ -218,13 +218,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { @@ -234,13 +234,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { @@ -250,13 +250,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { @@ -266,13 +266,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { @@ -282,13 +282,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { @@ -298,13 +298,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { @@ -314,13 +314,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { @@ -330,13 +330,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { @@ -346,13 +346,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { @@ -362,13 +362,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { @@ -378,13 +378,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { @@ -394,13 +394,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { @@ -410,13 +410,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { @@ -426,13 +426,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { @@ -442,13 +442,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { @@ -458,13 +458,13 @@ // CHECK-RV32-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { @@ -474,13 +474,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { @@ -490,13 +490,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { @@ -506,13 +506,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { @@ -522,13 +522,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { @@ -538,13 +538,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { @@ -554,13 +554,13 @@ // CHECK-RV32-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { @@ -570,13 +570,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { @@ -586,13 +586,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { @@ -602,13 +602,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { @@ -618,13 +618,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { @@ -634,13 +634,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { @@ -650,13 +650,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { @@ -666,13 +666,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { @@ -682,13 +682,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { @@ -698,13 +698,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { @@ -714,13 +714,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { @@ -730,13 +730,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { @@ -746,13 +746,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { @@ -762,13 +762,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { @@ -778,13 +778,13 @@ // CHECK-RV32-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { @@ -794,13 +794,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { @@ -810,13 +810,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { @@ -826,13 +826,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { @@ -842,13 +842,13 @@ // CHECK-RV32-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c @@ -10,13 +10,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -26,13 +26,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -42,13 +42,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -58,13 +58,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -74,13 +74,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -90,13 +90,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -106,13 +106,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -122,13 +122,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -138,13 +138,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -154,13 +154,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -170,13 +170,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -186,13 +186,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -202,13 +202,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -218,13 +218,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -234,13 +234,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,13 +250,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -266,13 +266,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -282,13 +282,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -298,13 +298,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -314,13 +314,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,13 +330,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -346,13 +346,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -362,13 +362,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -378,13 +378,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -394,13 +394,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -410,13 +410,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -426,13 +426,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -442,13 +442,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -458,13 +458,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -474,13 +474,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,13 +490,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -506,13 +506,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -522,13 +522,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -538,13 +538,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -554,13 +554,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,13 +570,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -586,13 +586,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -602,13 +602,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -618,13 +618,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -634,13 +634,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -650,13 +650,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -666,13 +666,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -682,13 +682,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -698,13 +698,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -714,13 +714,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -730,13 +730,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -746,13 +746,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -762,13 +762,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -778,13 +778,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -794,13 +794,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,13 +810,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -826,13 +826,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -842,13 +842,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -858,13 +858,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -874,13 +874,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -890,13 +890,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -906,13 +906,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -922,13 +922,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -938,13 +938,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -954,13 +954,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,13 +970,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -986,13 +986,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1002,13 +1002,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1018,13 +1018,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1034,13 +1034,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,13 +1050,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -1066,13 +1066,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1082,13 +1082,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1098,13 +1098,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1114,13 +1114,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,13 +1130,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1146,13 +1146,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1162,13 +1162,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1178,13 +1178,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -1194,13 +1194,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1210,13 +1210,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1226,13 +1226,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1242,13 +1242,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -1258,13 +1258,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1274,13 +1274,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,13 +1290,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1306,13 +1306,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1322,13 +1322,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1338,13 +1338,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1354,13 +1354,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -1370,13 +1370,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1386,13 +1386,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1402,13 +1402,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1418,13 +1418,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1434,13 +1434,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1450,13 +1450,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1466,13 +1466,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1482,13 +1482,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1498,13 +1498,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1514,13 +1514,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1530,13 +1530,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1546,13 +1546,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1562,13 +1562,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1578,13 +1578,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1594,13 +1594,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1610,13 +1610,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1626,13 +1626,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1642,13 +1642,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1658,13 +1658,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1674,13 +1674,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1690,13 +1690,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -1706,13 +1706,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1722,13 +1722,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1738,13 +1738,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1754,13 +1754,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1770,13 +1770,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1786,13 +1786,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -1802,13 +1802,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1818,13 +1818,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1834,13 +1834,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1850,13 +1850,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1866,13 +1866,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1882,13 +1882,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1898,13 +1898,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1914,13 +1914,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1931,13 +1931,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1947,13 +1947,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1963,13 +1963,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1979,13 +1979,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1995,13 +1995,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -2011,13 +2011,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -2027,13 +2027,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2043,13 +2043,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2059,13 +2059,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -2075,13 +2075,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -2091,13 +2091,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -2107,13 +2107,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2123,13 +2123,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -2139,13 +2139,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -2155,13 +2155,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -2171,13 +2171,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -2187,13 +2187,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -2203,13 +2203,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -2219,13 +2219,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -2235,13 +2235,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -2251,13 +2251,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2267,13 +2267,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2283,13 +2283,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2299,13 +2299,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -2315,13 +2315,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2331,13 +2331,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2347,13 +2347,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -2363,13 +2363,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -2379,13 +2379,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2395,13 +2395,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -2411,13 +2411,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -2427,13 +2427,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -2443,13 +2443,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -2459,13 +2459,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -2475,13 +2475,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -2491,13 +2491,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -2507,13 +2507,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -2523,13 +2523,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -2539,13 +2539,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -2555,13 +2555,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { @@ -2571,13 +2571,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { @@ -2587,13 +2587,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -2603,13 +2603,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -2619,13 +2619,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -2635,13 +2635,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { @@ -2651,13 +2651,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { @@ -2667,13 +2667,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -2683,13 +2683,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { @@ -2699,13 +2699,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -2715,13 +2715,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { @@ -2731,13 +2731,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { @@ -2747,13 +2747,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -2763,13 +2763,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { @@ -2779,13 +2779,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -2795,13 +2795,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { @@ -2811,13 +2811,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -2827,13 +2827,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -2843,13 +2843,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -2859,13 +2859,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { @@ -2875,13 +2875,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -2891,13 +2891,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -2907,13 +2907,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { @@ -2923,13 +2923,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { @@ -2939,13 +2939,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -2955,13 +2955,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -2971,13 +2971,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { @@ -2987,13 +2987,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { @@ -3003,13 +3003,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { @@ -3019,13 +3019,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -3035,13 +3035,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { @@ -3051,13 +3051,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { @@ -3067,13 +3067,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3083,13 +3083,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3099,13 +3099,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3115,13 +3115,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -3131,13 +3131,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -3147,13 +3147,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -3163,13 +3163,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -3179,13 +3179,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3195,13 +3195,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3211,13 +3211,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -3227,13 +3227,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -3243,13 +3243,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -3259,13 +3259,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -3275,13 +3275,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3291,13 +3291,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -3307,13 +3307,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -3323,13 +3323,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -3339,13 +3339,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -3355,13 +3355,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -3371,13 +3371,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -3387,13 +3387,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -3403,13 +3403,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -3419,13 +3419,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3435,13 +3435,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3451,13 +3451,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3467,13 +3467,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -3483,13 +3483,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -3499,13 +3499,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -3515,13 +3515,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3531,13 +3531,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3547,13 +3547,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -3563,13 +3563,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -3579,13 +3579,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -3595,13 +3595,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -3611,13 +3611,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3627,13 +3627,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -3643,13 +3643,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -3659,13 +3659,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -3675,13 +3675,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -3691,13 +3691,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -3707,13 +3707,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -3723,13 +3723,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -3739,13 +3739,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -3755,13 +3755,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3771,13 +3771,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3787,13 +3787,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3803,13 +3803,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -3819,13 +3819,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -3835,13 +3835,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3851,13 +3851,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3867,13 +3867,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -3883,13 +3883,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -3899,13 +3899,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -3915,13 +3915,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3931,13 +3931,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -3947,13 +3947,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -3963,13 +3963,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -3979,13 +3979,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -3995,13 +3995,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -4011,13 +4011,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -4027,13 +4027,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -4043,13 +4043,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -4059,13 +4059,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -4075,13 +4075,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -4091,13 +4091,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -4107,13 +4107,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -4123,13 +4123,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -4139,13 +4139,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -4155,13 +4155,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -4171,13 +4171,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -4187,13 +4187,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -4203,13 +4203,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -4219,13 +4219,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -4235,13 +4235,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -4251,13 +4251,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -4267,13 +4267,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -4283,13 +4283,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -4299,13 +4299,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -4315,13 +4315,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -4331,13 +4331,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -4347,13 +4347,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -4363,13 +4363,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -4379,13 +4379,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -4395,13 +4395,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -4411,13 +4411,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -4427,13 +4427,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -4443,13 +4443,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -4459,13 +4459,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -4475,13 +4475,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -4491,13 +4491,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -4507,13 +4507,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -4523,13 +4523,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -4539,13 +4539,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -4555,13 +4555,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -4571,13 +4571,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -4587,13 +4587,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -4603,13 +4603,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -4619,13 +4619,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -4635,13 +4635,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -4651,13 +4651,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -4667,13 +4667,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -4683,13 +4683,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -4699,13 +4699,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -4715,13 +4715,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -4731,13 +4731,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -4747,13 +4747,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -4763,13 +4763,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -4779,13 +4779,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -4795,13 +4795,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -4811,13 +4811,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -4827,13 +4827,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -4843,13 +4843,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -4859,13 +4859,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -4875,13 +4875,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -4891,13 +4891,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -4907,13 +4907,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -4923,13 +4923,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -4939,13 +4939,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -4955,13 +4955,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -4971,13 +4971,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -4987,13 +4987,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -5003,13 +5003,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -5019,13 +5019,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -5035,13 +5035,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -5051,13 +5051,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -5067,13 +5067,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -5083,13 +5083,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -5099,13 +5099,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -5115,13 +5115,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -5131,13 +5131,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -5147,13 +5147,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -5163,13 +5163,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -5179,13 +5179,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -5195,13 +5195,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -5211,13 +5211,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -5227,13 +5227,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -5243,13 +5243,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -5259,13 +5259,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -5275,13 +5275,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -5291,13 +5291,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -5307,13 +5307,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -5323,13 +5323,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -5339,13 +5339,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -5355,13 +5355,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -5371,13 +5371,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -5387,13 +5387,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -5403,13 +5403,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -5419,13 +5419,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -5435,13 +5435,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -5451,13 +5451,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -5467,13 +5467,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -5483,13 +5483,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -5499,13 +5499,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -5515,13 +5515,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -5531,13 +5531,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -5547,13 +5547,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -5563,13 +5563,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -5579,13 +5579,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -5595,13 +5595,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -5611,13 +5611,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { @@ -5627,13 +5627,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { @@ -5643,13 +5643,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -5659,13 +5659,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -5675,13 +5675,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { @@ -5691,13 +5691,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { @@ -5707,13 +5707,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { @@ -5723,13 +5723,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -5739,13 +5739,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { @@ -5755,13 +5755,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { @@ -5771,13 +5771,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { @@ -5787,13 +5787,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { @@ -5803,13 +5803,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { @@ -5819,13 +5819,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { @@ -5835,13 +5835,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { @@ -5851,13 +5851,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { @@ -5867,13 +5867,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -5883,13 +5883,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -5899,13 +5899,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -5915,13 +5915,13 @@ // CHECK-RV32-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { @@ -5931,13 +5931,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -5947,13 +5947,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -5963,13 +5963,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { @@ -5979,13 +5979,13 @@ // CHECK-RV32-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { @@ -5995,13 +5995,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -6011,13 +6011,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { @@ -6027,13 +6027,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { @@ -6043,13 +6043,13 @@ // CHECK-RV32-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { @@ -6059,13 +6059,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { @@ -6075,13 +6075,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { @@ -6091,13 +6091,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { @@ -6107,13 +6107,13 @@ // CHECK-RV32-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c @@ -10,13 +10,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -28,13 +28,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -46,13 +46,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -64,13 +64,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -82,13 +82,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -100,13 +100,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -118,13 +118,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -136,13 +136,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -154,13 +154,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -172,13 +172,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -190,13 +190,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -208,13 +208,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -226,13 +226,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -244,13 +244,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -262,13 +262,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -280,13 +280,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -298,13 +298,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -316,13 +316,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -334,13 +334,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -352,13 +352,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -370,13 +370,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -388,13 +388,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -406,13 +406,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -424,13 +424,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -442,13 +442,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -460,13 +460,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -478,13 +478,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -496,13 +496,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -514,13 +514,13 @@ // CHECK-RV32-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -532,13 +532,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -550,13 +550,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -568,13 +568,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -586,13 +586,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -604,13 +604,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -622,13 +622,13 @@ // CHECK-RV32-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -640,13 +640,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -658,13 +658,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -676,13 +676,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -694,13 +694,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -712,13 +712,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -730,13 +730,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -748,13 +748,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -766,13 +766,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -784,13 +784,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -802,13 +802,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -820,13 +820,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -838,13 +838,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -856,13 +856,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -874,13 +874,13 @@ // CHECK-RV32-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -892,13 +892,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -910,13 +910,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -928,13 +928,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -946,13 +946,13 @@ // CHECK-RV32-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], i32 [[BSTRIDE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c @@ -10,13 +10,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -26,13 +26,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -42,13 +42,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -58,13 +58,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -74,13 +74,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -90,13 +90,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -106,13 +106,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -122,13 +122,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -138,13 +138,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -154,13 +154,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -170,13 +170,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -186,13 +186,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -202,13 +202,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -218,13 +218,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -234,13 +234,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -250,13 +250,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -266,13 +266,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -282,13 +282,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -298,13 +298,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -314,13 +314,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -330,13 +330,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -346,13 +346,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -362,13 +362,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -378,13 +378,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -394,13 +394,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -410,13 +410,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -426,13 +426,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -442,13 +442,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -458,13 +458,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -474,13 +474,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -490,13 +490,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -506,13 +506,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -522,13 +522,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -538,13 +538,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -554,13 +554,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -570,13 +570,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -586,13 +586,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -602,13 +602,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -618,13 +618,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -634,13 +634,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -650,13 +650,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -666,13 +666,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -682,13 +682,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -698,13 +698,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -714,13 +714,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -730,13 +730,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -746,13 +746,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -762,13 +762,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -778,13 +778,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -794,13 +794,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -810,13 +810,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -826,13 +826,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -842,13 +842,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -858,13 +858,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -874,13 +874,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -890,13 +890,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -906,13 +906,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -922,13 +922,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -938,13 +938,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -954,13 +954,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -970,13 +970,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -986,13 +986,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -1002,13 +1002,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1018,13 +1018,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1034,13 +1034,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1050,13 +1050,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -1066,13 +1066,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1082,13 +1082,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1098,13 +1098,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -1114,13 +1114,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -1130,13 +1130,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1146,13 +1146,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -1162,13 +1162,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -1178,13 +1178,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -1194,13 +1194,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -1210,13 +1210,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -1226,13 +1226,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -1242,13 +1242,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -1258,13 +1258,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1274,13 +1274,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1290,13 +1290,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1306,13 +1306,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -1322,13 +1322,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -1338,13 +1338,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -1354,13 +1354,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -1370,13 +1370,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1386,13 +1386,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1402,13 +1402,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -1418,13 +1418,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -1434,13 +1434,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -1450,13 +1450,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -1466,13 +1466,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1482,13 +1482,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -1498,13 +1498,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -1514,13 +1514,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -1530,13 +1530,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -1546,13 +1546,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -1562,13 +1562,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -1578,13 +1578,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -1594,13 +1594,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -1610,13 +1610,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1626,13 +1626,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1642,13 +1642,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1658,13 +1658,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -1674,13 +1674,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -1690,13 +1690,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -1706,13 +1706,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -1722,13 +1722,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -1738,13 +1738,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -1754,13 +1754,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -1770,13 +1770,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -1786,13 +1786,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -1802,13 +1802,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -1818,13 +1818,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -1834,13 +1834,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -1850,13 +1850,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -1866,13 +1866,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -1882,13 +1882,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -1898,13 +1898,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -1914,13 +1914,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -1930,13 +1930,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -1946,13 +1946,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -1962,13 +1962,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -1978,13 +1978,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -1994,13 +1994,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -2010,13 +2010,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -2026,13 +2026,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2042,13 +2042,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2058,13 +2058,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -2074,13 +2074,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -2090,13 +2090,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -2106,13 +2106,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2122,13 +2122,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -2138,13 +2138,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -2154,13 +2154,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -2170,13 +2170,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -2186,13 +2186,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -2202,13 +2202,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -2218,13 +2218,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -2234,13 +2234,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -2250,13 +2250,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -2266,13 +2266,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -2282,13 +2282,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -2298,13 +2298,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -2314,13 +2314,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -2330,13 +2330,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -2346,13 +2346,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -2362,13 +2362,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -2378,13 +2378,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -2394,13 +2394,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -2410,13 +2410,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -2426,13 +2426,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -2442,13 +2442,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -2458,13 +2458,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -2474,13 +2474,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -2490,13 +2490,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -2506,13 +2506,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { @@ -2522,13 +2522,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { @@ -2538,13 +2538,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { @@ -2554,13 +2554,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { @@ -2570,13 +2570,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { @@ -2586,13 +2586,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { @@ -2602,13 +2602,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { @@ -2618,13 +2618,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { @@ -2634,13 +2634,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { @@ -2650,13 +2650,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { @@ -2666,13 +2666,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { @@ -2682,13 +2682,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { @@ -2698,13 +2698,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { @@ -2714,13 +2714,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { @@ -2730,13 +2730,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { @@ -2746,13 +2746,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { @@ -2762,13 +2762,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { @@ -2778,13 +2778,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { @@ -2794,13 +2794,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { @@ -2810,13 +2810,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { @@ -2826,13 +2826,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { @@ -2842,13 +2842,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { @@ -2858,13 +2858,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { @@ -2874,13 +2874,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { @@ -2890,13 +2890,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { @@ -2906,13 +2906,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { @@ -2922,13 +2922,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { @@ -2938,13 +2938,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { @@ -2954,13 +2954,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { @@ -2970,13 +2970,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { @@ -2986,13 +2986,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { @@ -3002,13 +3002,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { @@ -3018,13 +3018,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { @@ -3034,13 +3034,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { @@ -3050,13 +3050,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i32(* [[TMP0]], [[BINDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { @@ -3066,13 +3066,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3082,13 +3082,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3098,13 +3098,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3114,13 +3114,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { @@ -3130,13 +3130,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { @@ -3146,13 +3146,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { @@ -3162,13 +3162,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { @@ -3178,13 +3178,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3194,13 +3194,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3210,13 +3210,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { @@ -3226,13 +3226,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { @@ -3242,13 +3242,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { @@ -3258,13 +3258,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { @@ -3274,13 +3274,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3290,13 +3290,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { @@ -3306,13 +3306,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { @@ -3322,13 +3322,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { @@ -3338,13 +3338,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { @@ -3354,13 +3354,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { @@ -3370,13 +3370,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { @@ -3386,13 +3386,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { @@ -3402,13 +3402,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { @@ -3418,13 +3418,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3434,13 +3434,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3450,13 +3450,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3466,13 +3466,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { @@ -3482,13 +3482,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { @@ -3498,13 +3498,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { @@ -3514,13 +3514,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3530,13 +3530,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3546,13 +3546,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { @@ -3562,13 +3562,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { @@ -3578,13 +3578,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { @@ -3594,13 +3594,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { @@ -3610,13 +3610,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3626,13 +3626,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { @@ -3642,13 +3642,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { @@ -3658,13 +3658,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { @@ -3674,13 +3674,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { @@ -3690,13 +3690,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { @@ -3706,13 +3706,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { @@ -3722,13 +3722,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { @@ -3738,13 +3738,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { @@ -3754,13 +3754,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -3770,13 +3770,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -3786,13 +3786,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -3802,13 +3802,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { @@ -3818,13 +3818,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { @@ -3834,13 +3834,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -3850,13 +3850,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -3866,13 +3866,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { @@ -3882,13 +3882,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { @@ -3898,13 +3898,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { @@ -3914,13 +3914,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -3930,13 +3930,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { @@ -3946,13 +3946,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { @@ -3962,13 +3962,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { @@ -3978,13 +3978,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { @@ -3994,13 +3994,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { @@ -4010,13 +4010,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { @@ -4026,13 +4026,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { @@ -4042,13 +4042,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { @@ -4058,13 +4058,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -4074,13 +4074,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -4090,13 +4090,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -4106,13 +4106,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { @@ -4122,13 +4122,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -4138,13 +4138,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -4154,13 +4154,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { @@ -4170,13 +4170,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { @@ -4186,13 +4186,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -4202,13 +4202,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { @@ -4218,13 +4218,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { @@ -4234,13 +4234,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { @@ -4250,13 +4250,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { @@ -4266,13 +4266,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { @@ -4282,13 +4282,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { @@ -4298,13 +4298,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { @@ -4314,13 +4314,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { @@ -4330,13 +4330,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { @@ -4346,13 +4346,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { @@ -4362,13 +4362,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { @@ -4378,13 +4378,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { @@ -4394,13 +4394,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { @@ -4410,13 +4410,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { @@ -4426,13 +4426,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { @@ -4442,13 +4442,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { @@ -4458,13 +4458,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { @@ -4474,13 +4474,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { @@ -4490,13 +4490,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { @@ -4506,13 +4506,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { @@ -4522,13 +4522,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { @@ -4538,13 +4538,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { @@ -4554,13 +4554,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { @@ -4570,13 +4570,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { @@ -4586,13 +4586,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { @@ -4602,13 +4602,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { @@ -4618,13 +4618,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { @@ -4634,13 +4634,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { @@ -4650,13 +4650,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { @@ -4666,13 +4666,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { @@ -4682,13 +4682,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { @@ -4698,13 +4698,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { @@ -4714,13 +4714,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { @@ -4730,13 +4730,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { @@ -4746,13 +4746,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { @@ -4762,13 +4762,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { @@ -4778,13 +4778,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { @@ -4794,13 +4794,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { @@ -4810,13 +4810,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { @@ -4826,13 +4826,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { @@ -4842,13 +4842,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { @@ -4858,13 +4858,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { @@ -4874,13 +4874,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { @@ -4890,13 +4890,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { @@ -4906,13 +4906,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { @@ -4922,13 +4922,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { @@ -4938,13 +4938,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { @@ -4954,13 +4954,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { @@ -4970,13 +4970,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { @@ -4986,13 +4986,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { @@ -5002,13 +5002,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { @@ -5018,13 +5018,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { @@ -5034,13 +5034,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { @@ -5050,13 +5050,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { @@ -5066,13 +5066,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { @@ -5082,13 +5082,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { @@ -5098,13 +5098,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { @@ -5114,13 +5114,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { @@ -5130,13 +5130,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { @@ -5146,13 +5146,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { @@ -5162,13 +5162,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { @@ -5178,13 +5178,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { @@ -5194,13 +5194,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { @@ -5210,13 +5210,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { @@ -5226,13 +5226,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { @@ -5242,13 +5242,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { @@ -5258,13 +5258,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { @@ -5274,13 +5274,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { @@ -5290,13 +5290,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { @@ -5306,13 +5306,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { @@ -5322,13 +5322,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { @@ -5338,13 +5338,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { @@ -5354,13 +5354,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { @@ -5370,13 +5370,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { @@ -5386,13 +5386,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { @@ -5402,13 +5402,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { @@ -5418,13 +5418,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { @@ -5434,13 +5434,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { @@ -5450,13 +5450,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { @@ -5466,13 +5466,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { @@ -5482,13 +5482,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { @@ -5498,13 +5498,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { @@ -5514,13 +5514,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { @@ -5530,13 +5530,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { @@ -5546,13 +5546,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { @@ -5562,13 +5562,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { @@ -5578,13 +5578,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { @@ -5594,13 +5594,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { @@ -5610,13 +5610,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { @@ -5626,13 +5626,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { @@ -5642,13 +5642,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { @@ -5658,13 +5658,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { @@ -5674,13 +5674,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { @@ -5690,13 +5690,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { @@ -5706,13 +5706,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { @@ -5722,13 +5722,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { @@ -5738,13 +5738,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { @@ -5754,13 +5754,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { @@ -5770,13 +5770,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { @@ -5786,13 +5786,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { @@ -5802,13 +5802,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { @@ -5818,13 +5818,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { @@ -5834,13 +5834,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { @@ -5850,13 +5850,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { @@ -5866,13 +5866,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { @@ -5882,13 +5882,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { @@ -5898,13 +5898,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { @@ -5914,13 +5914,13 @@ // CHECK-RV32-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { @@ -5930,13 +5930,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { @@ -5946,13 +5946,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { @@ -5962,13 +5962,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { @@ -5978,13 +5978,13 @@ // CHECK-RV32-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { @@ -5994,13 +5994,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { @@ -6010,13 +6010,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { @@ -6026,13 +6026,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { @@ -6042,13 +6042,13 @@ // CHECK-RV32-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { @@ -6058,13 +6058,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { @@ -6074,13 +6074,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { @@ -6090,13 +6090,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { @@ -6106,13 +6106,13 @@ // CHECK-RV32-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP1]] // // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vmacc_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmacc.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, @@ -80,12 +80,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, @@ -95,12 +95,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -109,12 +109,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -123,12 +123,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, @@ -153,12 +153,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -167,12 +167,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin, @@ -196,12 +196,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin, @@ -211,12 +211,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { @@ -239,12 +239,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin, @@ -254,12 +254,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin, @@ -269,12 +269,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -283,12 +283,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -297,12 +297,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin, @@ -312,12 +312,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin, @@ -327,12 +327,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -341,12 +341,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { @@ -355,12 +355,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin, @@ -370,12 +370,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin, @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -428,12 +428,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, @@ -546,12 +546,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, @@ -561,12 +561,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -575,12 +575,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { @@ -589,12 +589,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, @@ -604,12 +604,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin, @@ -619,12 +619,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -633,12 +633,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { @@ -647,12 +647,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, @@ -662,12 +662,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin, @@ -677,12 +677,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -691,12 +691,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -705,12 +705,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, @@ -720,12 +720,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin, @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -778,12 +778,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, @@ -793,12 +793,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -808,12 +808,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -822,12 +822,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, @@ -837,12 +837,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, @@ -852,12 +852,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -866,12 +866,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { @@ -880,12 +880,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, @@ -895,12 +895,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, @@ -910,12 +910,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -924,12 +924,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { @@ -938,12 +938,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, @@ -953,12 +953,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin, @@ -968,12 +968,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -996,12 +996,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, @@ -1011,12 +1011,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin, @@ -1026,12 +1026,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1040,12 +1040,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1054,12 +1054,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, @@ -1069,12 +1069,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, @@ -1084,12 +1084,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1098,12 +1098,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { @@ -1112,12 +1112,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, @@ -1142,12 +1142,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1156,12 +1156,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { @@ -1170,12 +1170,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, @@ -1185,12 +1185,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1214,12 +1214,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { @@ -1228,12 +1228,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin, @@ -1258,12 +1258,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1272,12 +1272,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -1286,12 +1286,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, @@ -1301,12 +1301,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, @@ -1316,12 +1316,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1330,12 +1330,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1344,12 +1344,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, @@ -1359,12 +1359,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, @@ -1374,12 +1374,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1388,12 +1388,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1402,12 +1402,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, @@ -1417,12 +1417,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, @@ -1432,12 +1432,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1446,12 +1446,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1460,12 +1460,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, @@ -1475,12 +1475,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin, @@ -1490,12 +1490,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1504,12 +1504,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,12 +1518,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin, @@ -1548,12 +1548,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1562,12 +1562,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1576,12 +1576,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, @@ -1591,12 +1591,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin, @@ -1606,12 +1606,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1620,12 +1620,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1634,12 +1634,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, @@ -1649,12 +1649,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin, @@ -1664,12 +1664,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1678,12 +1678,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1692,12 +1692,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1707,12 +1707,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, @@ -1722,12 +1722,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1737,12 +1737,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1751,12 +1751,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1766,12 +1766,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, @@ -1781,12 +1781,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1796,12 +1796,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1810,12 +1810,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, @@ -1825,12 +1825,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, @@ -1840,12 +1840,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1854,12 +1854,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1868,12 +1868,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, @@ -1898,12 +1898,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1912,12 +1912,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1926,12 +1926,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, @@ -1941,12 +1941,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, @@ -1956,12 +1956,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1970,12 +1970,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1984,12 +1984,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, @@ -1999,12 +1999,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, @@ -2014,12 +2014,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2028,12 +2028,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2042,12 +2042,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -2057,12 +2057,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, @@ -2072,12 +2072,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -2087,12 +2087,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2101,12 +2101,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, @@ -2116,12 +2116,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, @@ -2131,12 +2131,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2145,12 +2145,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2159,12 +2159,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, @@ -2174,12 +2174,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, @@ -2189,12 +2189,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2203,12 +2203,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2217,12 +2217,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, @@ -2232,12 +2232,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, @@ -2290,12 +2290,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, @@ -2305,12 +2305,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2319,12 +2319,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2333,12 +2333,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, @@ -2348,12 +2348,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, @@ -2363,12 +2363,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2377,12 +2377,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2391,12 +2391,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, @@ -2406,12 +2406,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, @@ -2421,12 +2421,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2435,12 +2435,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2449,12 +2449,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, @@ -2464,12 +2464,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, @@ -2479,12 +2479,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2493,12 +2493,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2507,12 +2507,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vvm_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, @@ -2522,12 +2522,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vxm_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[CARRYIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, @@ -2537,12 +2537,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vv_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2551,12 +2551,12 @@ // CHECK-RV32-LABEL: @test_vmadc_vx_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vmadd_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmadd.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmand_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmand_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmand_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmand_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmand_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmand_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmand_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmand_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmand_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmand_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmand_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmand_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmand_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmand_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmand.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmandnot_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmandnot_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmandnot_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmandnot_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmandnot_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmandnot_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmandnot_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmandnot_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmandnot_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmandnot_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmandnot_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmandnot_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmandnot_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmandnot_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmandnot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vmax_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vmax_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, @@ -294,12 +294,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, @@ -309,12 +309,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, @@ -324,12 +324,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, @@ -339,12 +339,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, @@ -354,12 +354,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, @@ -369,12 +369,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, @@ -384,12 +384,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, @@ -414,12 +414,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, @@ -459,12 +459,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, @@ -489,12 +489,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, @@ -504,12 +504,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, @@ -519,12 +519,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, @@ -549,12 +549,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, @@ -564,12 +564,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, @@ -579,12 +579,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, @@ -594,12 +594,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, @@ -624,12 +624,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, @@ -654,12 +654,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, @@ -669,12 +669,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, @@ -684,12 +684,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, @@ -714,12 +714,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, @@ -729,12 +729,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, @@ -744,12 +744,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, @@ -759,12 +759,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, @@ -774,12 +774,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, @@ -789,12 +789,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, @@ -804,12 +804,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, @@ -834,12 +834,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, @@ -849,12 +849,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, @@ -864,12 +864,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, @@ -879,12 +879,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -894,12 +894,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, @@ -909,12 +909,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -924,12 +924,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, @@ -939,12 +939,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, @@ -954,12 +954,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, @@ -969,12 +969,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, @@ -984,12 +984,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, @@ -999,12 +999,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, @@ -1014,12 +1014,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, @@ -1044,12 +1044,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, @@ -1059,12 +1059,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -1074,12 +1074,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, @@ -1089,12 +1089,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, @@ -1104,12 +1104,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, @@ -1119,12 +1119,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, @@ -1134,12 +1134,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, @@ -1149,12 +1149,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, @@ -1164,12 +1164,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, @@ -1194,12 +1194,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, @@ -1209,12 +1209,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, @@ -1224,12 +1224,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, @@ -1254,12 +1254,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, @@ -1269,12 +1269,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, @@ -1284,12 +1284,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, @@ -1299,12 +1299,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, @@ -1314,12 +1314,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vxm_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmerge.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, @@ -1329,12 +1329,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, @@ -1344,12 +1344,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, @@ -1359,12 +1359,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, @@ -1374,12 +1374,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, @@ -1389,12 +1389,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, @@ -1404,12 +1404,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, @@ -1419,12 +1419,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, @@ -1434,12 +1434,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmerge_vvm_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -110,12 +110,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { @@ -124,12 +124,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { @@ -152,12 +152,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, @@ -167,12 +167,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, @@ -196,12 +196,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { @@ -239,12 +239,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -253,12 +253,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { @@ -267,12 +267,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -283,12 +283,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -298,12 +298,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -314,12 +314,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -360,12 +360,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -376,12 +376,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -391,12 +391,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -407,12 +407,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -422,12 +422,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -438,12 +438,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -453,12 +453,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -484,12 +484,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -515,12 +515,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -150,12 +150,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -165,12 +165,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -195,12 +195,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -240,12 +240,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -255,12 +255,12 @@ // CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -110,12 +110,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { @@ -124,12 +124,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { @@ -152,12 +152,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, @@ -167,12 +167,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, @@ -196,12 +196,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { @@ -239,12 +239,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -253,12 +253,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { @@ -267,12 +267,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -283,12 +283,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -298,12 +298,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -314,12 +314,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -360,12 +360,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -376,12 +376,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -391,12 +391,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -407,12 +407,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -422,12 +422,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -438,12 +438,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -453,12 +453,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -484,12 +484,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -515,12 +515,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -110,12 +110,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { @@ -124,12 +124,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { @@ -152,12 +152,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, @@ -167,12 +167,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, @@ -196,12 +196,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { @@ -239,12 +239,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -253,12 +253,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { @@ -267,12 +267,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -283,12 +283,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -298,12 +298,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -314,12 +314,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -360,12 +360,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -376,12 +376,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -391,12 +391,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -407,12 +407,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -422,12 +422,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -438,12 +438,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -453,12 +453,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -484,12 +484,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -515,12 +515,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -110,12 +110,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) { @@ -124,12 +124,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) { @@ -152,12 +152,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2, @@ -167,12 +167,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) { @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2, @@ -196,12 +196,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) { @@ -210,12 +210,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2, @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) { @@ -239,12 +239,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -253,12 +253,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { @@ -267,12 +267,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -283,12 +283,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -298,12 +298,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -314,12 +314,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -360,12 +360,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -376,12 +376,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -391,12 +391,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -407,12 +407,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -422,12 +422,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -438,12 +438,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -453,12 +453,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -484,12 +484,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -515,12 +515,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vmin_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vmin_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vminu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vminu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmnand_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnand_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmnand_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnand_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmnand_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnand_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmnand_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnand_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmnand_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnand_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmnand_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnand_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmnand_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnand_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnand.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmnor_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnor_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmnor_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnor_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmnor_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnor_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmnor_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnor_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmnor_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnor_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmnor_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnor_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmnor_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmnor_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmnor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmor_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmor_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmor_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmor_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmor_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmor_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmor_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmor_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmor_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmor_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmor_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmor_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmor_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmor_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmornot_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmornot_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmornot_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmornot_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmornot_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmornot_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmornot_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmornot_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmornot_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmornot_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmornot_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmornot_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmornot_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmornot_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmornot.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, @@ -80,12 +80,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2, @@ -95,12 +95,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -109,12 +109,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -123,12 +123,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2, @@ -153,12 +153,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -167,12 +167,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, @@ -196,12 +196,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin, @@ -211,12 +211,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { @@ -239,12 +239,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, @@ -254,12 +254,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin, @@ -269,12 +269,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -283,12 +283,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -297,12 +297,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, @@ -312,12 +312,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin, @@ -327,12 +327,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -341,12 +341,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { @@ -355,12 +355,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, @@ -370,12 +370,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin, @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -428,12 +428,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2, @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2, @@ -546,12 +546,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2, @@ -561,12 +561,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -575,12 +575,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { @@ -589,12 +589,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2, @@ -604,12 +604,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin, @@ -619,12 +619,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -633,12 +633,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { @@ -647,12 +647,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2, @@ -662,12 +662,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, @@ -677,12 +677,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -691,12 +691,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -705,12 +705,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2, @@ -720,12 +720,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin, @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -778,12 +778,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2, @@ -793,12 +793,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -808,12 +808,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -822,12 +822,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2, @@ -837,12 +837,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2, @@ -852,12 +852,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -866,12 +866,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { @@ -880,12 +880,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2, @@ -895,12 +895,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2, @@ -910,12 +910,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -924,12 +924,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { @@ -938,12 +938,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2, @@ -953,12 +953,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin, @@ -968,12 +968,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -996,12 +996,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2, @@ -1011,12 +1011,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin, @@ -1026,12 +1026,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1040,12 +1040,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1054,12 +1054,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2, @@ -1069,12 +1069,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2, @@ -1084,12 +1084,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1098,12 +1098,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { @@ -1112,12 +1112,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2, @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2, @@ -1142,12 +1142,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1156,12 +1156,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { @@ -1170,12 +1170,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2, @@ -1185,12 +1185,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2, @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1214,12 +1214,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { @@ -1228,12 +1228,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, @@ -1258,12 +1258,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1272,12 +1272,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -1286,12 +1286,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, @@ -1301,12 +1301,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, @@ -1316,12 +1316,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1330,12 +1330,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1344,12 +1344,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, @@ -1359,12 +1359,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, @@ -1374,12 +1374,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1388,12 +1388,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1402,12 +1402,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, @@ -1417,12 +1417,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, @@ -1432,12 +1432,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1446,12 +1446,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1460,12 +1460,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, @@ -1475,12 +1475,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, @@ -1490,12 +1490,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1504,12 +1504,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1518,12 +1518,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, @@ -1548,12 +1548,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1562,12 +1562,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1576,12 +1576,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, @@ -1591,12 +1591,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, @@ -1606,12 +1606,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1620,12 +1620,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1634,12 +1634,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, @@ -1649,12 +1649,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, @@ -1664,12 +1664,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1678,12 +1678,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1692,12 +1692,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1707,12 +1707,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, @@ -1722,12 +1722,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1737,12 +1737,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1751,12 +1751,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1766,12 +1766,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, @@ -1781,12 +1781,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1796,12 +1796,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1810,12 +1810,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, @@ -1825,12 +1825,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2, @@ -1840,12 +1840,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1854,12 +1854,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1868,12 +1868,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2, @@ -1898,12 +1898,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1912,12 +1912,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1926,12 +1926,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, @@ -1941,12 +1941,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2, @@ -1956,12 +1956,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1970,12 +1970,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1984,12 +1984,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, @@ -1999,12 +1999,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2, @@ -2014,12 +2014,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2028,12 +2028,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2042,12 +2042,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -2057,12 +2057,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, @@ -2072,12 +2072,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -2087,12 +2087,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2101,12 +2101,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, @@ -2116,12 +2116,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2, @@ -2131,12 +2131,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2145,12 +2145,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2159,12 +2159,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, @@ -2174,12 +2174,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2, @@ -2189,12 +2189,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2203,12 +2203,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2217,12 +2217,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, @@ -2232,12 +2232,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2, @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, @@ -2290,12 +2290,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2, @@ -2305,12 +2305,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2319,12 +2319,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2333,12 +2333,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, @@ -2348,12 +2348,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2, @@ -2363,12 +2363,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2377,12 +2377,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2391,12 +2391,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, @@ -2406,12 +2406,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2, @@ -2421,12 +2421,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2435,12 +2435,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2449,12 +2449,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, @@ -2464,12 +2464,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2, @@ -2479,12 +2479,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2493,12 +2493,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2507,12 +2507,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, @@ -2522,12 +2522,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2, @@ -2537,12 +2537,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vv_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vv_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2551,12 +2551,12 @@ // CHECK-RV32-LABEL: @test_vmsbc_vx_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbc_vx_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c @@ -7,96 +7,96 @@ // CHECK-RV32-LABEL: @test_vmsbf_m_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { return vmsbf(op1, vl); } // CHECK-RV32-LABEL: @test_vmsbf_m_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { return vmsbf(op1, vl); } // CHECK-RV32-LABEL: @test_vmsbf_m_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { return vmsbf(op1, vl); } // CHECK-RV32-LABEL: @test_vmsbf_m_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { return vmsbf(op1, vl); } // CHECK-RV32-LABEL: @test_vmsbf_m_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { return vmsbf(op1, vl); } // CHECK-RV32-LABEL: @test_vmsbf_m_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { return vmsbf(op1, vl); } // CHECK-RV32-LABEL: @test_vmsbf_m_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { return vmsbf(op1, vl); } // CHECK-RV32-LABEL: @test_vmsbf_m_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, @@ -106,12 +106,12 @@ // CHECK-RV32-LABEL: @test_vmsbf_m_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vmsbf_m_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, @@ -136,12 +136,12 @@ // CHECK-RV32-LABEL: @test_vmsbf_m_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, @@ -151,12 +151,12 @@ // CHECK-RV32-LABEL: @test_vmsbf_m_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, @@ -166,12 +166,12 @@ // CHECK-RV32-LABEL: @test_vmsbf_m_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmsbf_m_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsbf_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsbf.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -218,12 +218,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -486,12 +486,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { @@ -514,12 +514,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { @@ -542,12 +542,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -556,12 +556,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { @@ -570,12 +570,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -584,12 +584,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { @@ -598,12 +598,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -612,12 +612,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -626,12 +626,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -640,12 +640,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -654,12 +654,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -668,12 +668,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -682,12 +682,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -696,12 +696,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -710,12 +710,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -724,12 +724,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -738,12 +738,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -752,12 +752,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -766,12 +766,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -780,12 +780,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -794,12 +794,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -808,12 +808,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -822,12 +822,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -837,12 +837,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -851,12 +851,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -866,12 +866,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -880,12 +880,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -894,12 +894,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -908,12 +908,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -922,12 +922,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -936,12 +936,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -950,12 +950,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -964,12 +964,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -978,12 +978,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -992,12 +992,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1007,12 +1007,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1021,12 +1021,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1049,12 +1049,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1063,12 +1063,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1077,12 +1077,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1091,12 +1091,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1105,12 +1105,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1119,12 +1119,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1133,12 +1133,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1161,12 +1161,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1175,12 +1175,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1189,12 +1189,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1203,12 +1203,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1217,12 +1217,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1231,12 +1231,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1245,12 +1245,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1260,12 +1260,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1290,12 +1290,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1305,12 +1305,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1320,12 +1320,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1335,12 +1335,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1350,12 +1350,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1380,12 +1380,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1395,12 +1395,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1410,12 +1410,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1425,12 +1425,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1440,12 +1440,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1455,12 +1455,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1471,12 +1471,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1486,12 +1486,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1502,12 +1502,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1517,12 +1517,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1532,12 +1532,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1562,12 +1562,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1577,12 +1577,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1592,12 +1592,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1607,12 +1607,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1622,12 +1622,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1637,12 +1637,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1653,12 +1653,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1668,12 +1668,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1683,12 +1683,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1698,12 +1698,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1713,12 +1713,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1728,12 +1728,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1758,12 +1758,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1773,12 +1773,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1788,12 +1788,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1803,12 +1803,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1818,12 +1818,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1833,12 +1833,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1848,12 +1848,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1863,12 +1863,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1878,12 +1878,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1893,12 +1893,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1908,12 +1908,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1924,12 +1924,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1955,12 +1955,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1970,12 +1970,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1986,12 +1986,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2001,12 +2001,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2016,12 +2016,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2031,12 +2031,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2046,12 +2046,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2061,12 +2061,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2076,12 +2076,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2091,12 +2091,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2106,12 +2106,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2137,12 +2137,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2153,12 +2153,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2169,12 +2169,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2185,12 +2185,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2201,12 +2201,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2216,12 +2216,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2231,12 +2231,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2246,12 +2246,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2276,12 +2276,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2291,12 +2291,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2306,12 +2306,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2322,12 +2322,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2338,12 +2338,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2354,12 +2354,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2369,12 +2369,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2385,12 +2385,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2400,12 +2400,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2430,12 +2430,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2445,12 +2445,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2460,12 +2460,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2476,12 +2476,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2491,12 +2491,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2507,12 +2507,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2522,12 +2522,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2538,12 +2538,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2553,12 +2553,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vv_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2568,12 +2568,12 @@ // CHECK-RV32-LABEL: @test_vmseq_vx_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -638,12 +638,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -653,12 +653,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -668,12 +668,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -698,12 +698,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -713,12 +713,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -728,12 +728,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -743,12 +743,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -758,12 +758,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -773,12 +773,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -788,12 +788,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -803,12 +803,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -818,12 +818,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -848,12 +848,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -863,12 +863,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -878,12 +878,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -893,12 +893,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -908,12 +908,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -923,12 +923,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -938,12 +938,12 @@ // CHECK-RV32-LABEL: @test_vmsgt_vx_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -953,12 +953,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -968,12 +968,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -983,12 +983,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -998,12 +998,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1013,12 +1013,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1028,12 +1028,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1058,12 +1058,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1074,12 +1074,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1090,12 +1090,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1105,12 +1105,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1120,12 +1120,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1135,12 +1135,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1150,12 +1150,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1166,12 +1166,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1181,12 +1181,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1196,12 +1196,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1226,12 +1226,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1241,12 +1241,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1256,12 +1256,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1271,12 +1271,12 @@ // CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c @@ -7,96 +7,96 @@ // CHECK-RV32-LABEL: @test_vmsif_m_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { return vmsif(op1, vl); } // CHECK-RV32-LABEL: @test_vmsif_m_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { return vmsif(op1, vl); } // CHECK-RV32-LABEL: @test_vmsif_m_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { return vmsif(op1, vl); } // CHECK-RV32-LABEL: @test_vmsif_m_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { return vmsif(op1, vl); } // CHECK-RV32-LABEL: @test_vmsif_m_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { return vmsif(op1, vl); } // CHECK-RV32-LABEL: @test_vmsif_m_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { return vmsif(op1, vl); } // CHECK-RV32-LABEL: @test_vmsif_m_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { return vmsif(op1, vl); } // CHECK-RV32-LABEL: @test_vmsif_m_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, @@ -106,12 +106,12 @@ // CHECK-RV32-LABEL: @test_vmsif_m_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vmsif_m_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, @@ -136,12 +136,12 @@ // CHECK-RV32-LABEL: @test_vmsif_m_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, @@ -151,12 +151,12 @@ // CHECK-RV32-LABEL: @test_vmsif_m_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, @@ -166,12 +166,12 @@ // CHECK-RV32-LABEL: @test_vmsif_m_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmsif_m_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsif_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsif.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -218,12 +218,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -486,12 +486,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { @@ -514,12 +514,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { @@ -542,12 +542,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -556,12 +556,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { @@ -570,12 +570,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -584,12 +584,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { @@ -598,12 +598,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -612,12 +612,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -626,12 +626,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, @@ -641,12 +641,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -655,12 +655,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, @@ -670,12 +670,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -684,12 +684,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -713,12 +713,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -727,12 +727,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -741,12 +741,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -755,12 +755,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -769,12 +769,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -783,12 +783,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -797,12 +797,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -825,12 +825,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -840,12 +840,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -854,12 +854,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -869,12 +869,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -883,12 +883,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, @@ -898,12 +898,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -912,12 +912,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -926,12 +926,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -940,12 +940,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -954,12 +954,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -968,12 +968,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -996,12 +996,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1011,12 +1011,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1025,12 +1025,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, @@ -1040,12 +1040,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1054,12 +1054,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, @@ -1069,12 +1069,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1097,12 +1097,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1111,12 +1111,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1125,12 +1125,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1139,12 +1139,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, @@ -1154,12 +1154,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1168,12 +1168,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, @@ -1212,12 +1212,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1226,12 +1226,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1240,12 +1240,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1254,12 +1254,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1269,12 +1269,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1284,12 +1284,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1299,12 +1299,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1314,12 +1314,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1329,12 +1329,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1344,12 +1344,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1359,12 +1359,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1374,12 +1374,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1389,12 +1389,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1404,12 +1404,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1419,12 +1419,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1434,12 +1434,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1464,12 +1464,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1480,12 +1480,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1495,12 +1495,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1511,12 +1511,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1526,12 +1526,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1541,12 +1541,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1556,12 +1556,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1571,12 +1571,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1586,12 +1586,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1601,12 +1601,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1616,12 +1616,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1646,12 +1646,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1662,12 +1662,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1677,12 +1677,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1692,12 +1692,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1707,12 +1707,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1722,12 +1722,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1737,12 +1737,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1752,12 +1752,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1767,12 +1767,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1782,12 +1782,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1797,12 +1797,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1812,12 +1812,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1842,12 +1842,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1857,12 +1857,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1872,12 +1872,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1887,12 +1887,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1902,12 +1902,12 @@ // CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsle.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1917,12 +1917,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1933,12 +1933,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1948,12 +1948,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1964,12 +1964,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1979,12 +1979,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2010,12 +2010,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2025,12 +2025,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2040,12 +2040,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2055,12 +2055,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2070,12 +2070,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2085,12 +2085,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2100,12 +2100,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2115,12 +2115,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2130,12 +2130,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2146,12 +2146,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2162,12 +2162,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2178,12 +2178,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2194,12 +2194,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2210,12 +2210,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2225,12 +2225,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2241,12 +2241,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2256,12 +2256,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2272,12 +2272,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2287,12 +2287,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2318,12 +2318,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2334,12 +2334,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2350,12 +2350,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2366,12 +2366,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2381,12 +2381,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2397,12 +2397,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2412,12 +2412,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2428,12 +2428,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2459,12 +2459,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2474,12 +2474,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2490,12 +2490,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2505,12 +2505,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2521,12 +2521,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2536,12 +2536,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2552,12 +2552,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2567,12 +2567,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2583,12 +2583,12 @@ // CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -218,12 +218,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -486,12 +486,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { @@ -514,12 +514,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { @@ -542,12 +542,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -556,12 +556,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { @@ -570,12 +570,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -584,12 +584,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { @@ -598,12 +598,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -612,12 +612,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -626,12 +626,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, @@ -641,12 +641,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -655,12 +655,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, @@ -670,12 +670,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -684,12 +684,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -713,12 +713,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -727,12 +727,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -741,12 +741,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -755,12 +755,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -769,12 +769,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -783,12 +783,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -797,12 +797,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -825,12 +825,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -840,12 +840,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -854,12 +854,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -869,12 +869,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -883,12 +883,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, @@ -898,12 +898,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -912,12 +912,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -926,12 +926,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -940,12 +940,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -954,12 +954,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -968,12 +968,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -996,12 +996,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1011,12 +1011,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1025,12 +1025,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, @@ -1040,12 +1040,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1054,12 +1054,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, @@ -1069,12 +1069,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1097,12 +1097,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1111,12 +1111,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1125,12 +1125,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1139,12 +1139,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, @@ -1154,12 +1154,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1168,12 +1168,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, @@ -1212,12 +1212,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1226,12 +1226,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1240,12 +1240,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1254,12 +1254,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1269,12 +1269,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1284,12 +1284,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1299,12 +1299,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1314,12 +1314,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1329,12 +1329,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1344,12 +1344,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1359,12 +1359,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1374,12 +1374,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1389,12 +1389,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1404,12 +1404,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1419,12 +1419,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1434,12 +1434,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1464,12 +1464,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1480,12 +1480,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1495,12 +1495,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1511,12 +1511,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1526,12 +1526,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1541,12 +1541,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1556,12 +1556,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1571,12 +1571,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1586,12 +1586,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1601,12 +1601,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1616,12 +1616,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1646,12 +1646,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1662,12 +1662,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1677,12 +1677,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1692,12 +1692,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1707,12 +1707,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1722,12 +1722,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1737,12 +1737,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1752,12 +1752,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1767,12 +1767,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1782,12 +1782,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1797,12 +1797,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1812,12 +1812,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1842,12 +1842,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1857,12 +1857,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1872,12 +1872,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1887,12 +1887,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vv_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1902,12 +1902,12 @@ // CHECK-RV32-LABEL: @test_vmslt_vx_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1917,12 +1917,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1933,12 +1933,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1948,12 +1948,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1964,12 +1964,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1979,12 +1979,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2010,12 +2010,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2025,12 +2025,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2040,12 +2040,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2055,12 +2055,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2070,12 +2070,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2085,12 +2085,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2100,12 +2100,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2115,12 +2115,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2130,12 +2130,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2146,12 +2146,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2162,12 +2162,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2178,12 +2178,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2194,12 +2194,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2210,12 +2210,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2225,12 +2225,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2241,12 +2241,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2256,12 +2256,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2272,12 +2272,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2287,12 +2287,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2318,12 +2318,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2334,12 +2334,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2350,12 +2350,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2366,12 +2366,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2381,12 +2381,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2397,12 +2397,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2412,12 +2412,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2428,12 +2428,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2459,12 +2459,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2474,12 +2474,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2490,12 +2490,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2505,12 +2505,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2521,12 +2521,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2536,12 +2536,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2552,12 +2552,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2567,12 +2567,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vv_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2583,12 +2583,12 @@ // CHECK-RV32-LABEL: @test_vmsltu_vx_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2, @@ -218,12 +218,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) { @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) { @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -486,12 +486,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) { @@ -514,12 +514,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) { @@ -542,12 +542,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -556,12 +556,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) { @@ -570,12 +570,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -584,12 +584,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) { @@ -598,12 +598,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -612,12 +612,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -626,12 +626,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -640,12 +640,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8mf8_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -654,12 +654,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -668,12 +668,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8mf4_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -682,12 +682,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -696,12 +696,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8mf2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -710,12 +710,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -724,12 +724,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m1_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -738,12 +738,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -752,12 +752,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m2_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -766,12 +766,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -780,12 +780,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m4_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -794,12 +794,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -808,12 +808,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m8_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -822,12 +822,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2, @@ -837,12 +837,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16mf4_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -851,12 +851,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2, @@ -866,12 +866,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16mf2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -880,12 +880,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -894,12 +894,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m1_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -908,12 +908,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -922,12 +922,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m2_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -936,12 +936,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -950,12 +950,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m4_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -964,12 +964,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -978,12 +978,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m8_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -992,12 +992,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1007,12 +1007,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32mf2_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1021,12 +1021,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m1_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1049,12 +1049,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1063,12 +1063,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m2_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1077,12 +1077,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1091,12 +1091,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m4_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1105,12 +1105,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1119,12 +1119,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m8_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1133,12 +1133,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m1_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1161,12 +1161,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1175,12 +1175,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m2_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1189,12 +1189,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1203,12 +1203,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m4_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1217,12 +1217,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1231,12 +1231,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m8_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1245,12 +1245,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1260,12 +1260,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1290,12 +1290,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1305,12 +1305,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1320,12 +1320,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1335,12 +1335,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1350,12 +1350,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1380,12 +1380,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1395,12 +1395,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1410,12 +1410,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1425,12 +1425,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1440,12 +1440,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -1455,12 +1455,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1471,12 +1471,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1486,12 +1486,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1502,12 +1502,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1517,12 +1517,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1532,12 +1532,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1562,12 +1562,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1577,12 +1577,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1592,12 +1592,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1607,12 +1607,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1622,12 +1622,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -1637,12 +1637,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1653,12 +1653,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1668,12 +1668,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1683,12 +1683,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1698,12 +1698,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1713,12 +1713,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1728,12 +1728,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1758,12 +1758,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1773,12 +1773,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -1788,12 +1788,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1803,12 +1803,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1818,12 +1818,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1833,12 +1833,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1848,12 +1848,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1863,12 +1863,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1878,12 +1878,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1893,12 +1893,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_i64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -1908,12 +1908,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1924,12 +1924,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8mf8_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1955,12 +1955,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8mf4_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -1970,12 +1970,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -1986,12 +1986,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8mf2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2001,12 +2001,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2016,12 +2016,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m1_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2031,12 +2031,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2046,12 +2046,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m2_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2061,12 +2061,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2076,12 +2076,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m4_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2091,12 +2091,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2106,12 +2106,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u8m8_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2137,12 +2137,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16mf4_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2153,12 +2153,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2169,12 +2169,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16mf2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2185,12 +2185,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2201,12 +2201,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m1_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2216,12 +2216,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2231,12 +2231,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m2_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2246,12 +2246,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m4_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2276,12 +2276,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2291,12 +2291,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u16m8_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, @@ -2306,12 +2306,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2322,12 +2322,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32mf2_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2338,12 +2338,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2354,12 +2354,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m1_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2369,12 +2369,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2385,12 +2385,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m2_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2400,12 +2400,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m4_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2430,12 +2430,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2445,12 +2445,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u32m8_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, @@ -2460,12 +2460,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2476,12 +2476,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m1_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, @@ -2491,12 +2491,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2507,12 +2507,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m2_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, @@ -2522,12 +2522,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2538,12 +2538,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m4_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, @@ -2553,12 +2553,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vv_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, @@ -2568,12 +2568,12 @@ // CHECK-RV32-LABEL: @test_vmsne_vx_u64m8_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c @@ -7,96 +7,96 @@ // CHECK-RV32-LABEL: @test_vmsof_m_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { return vmsof(op1, vl); } // CHECK-RV32-LABEL: @test_vmsof_m_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { return vmsof(op1, vl); } // CHECK-RV32-LABEL: @test_vmsof_m_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { return vmsof(op1, vl); } // CHECK-RV32-LABEL: @test_vmsof_m_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { return vmsof(op1, vl); } // CHECK-RV32-LABEL: @test_vmsof_m_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { return vmsof(op1, vl); } // CHECK-RV32-LABEL: @test_vmsof_m_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { return vmsof(op1, vl); } // CHECK-RV32-LABEL: @test_vmsof_m_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { return vmsof(op1, vl); } // CHECK-RV32-LABEL: @test_vmsof_m_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv64i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, @@ -106,12 +106,12 @@ // CHECK-RV32-LABEL: @test_vmsof_m_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv32i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vmsof_m_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv16i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, @@ -136,12 +136,12 @@ // CHECK-RV32-LABEL: @test_vmsof_m_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv8i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, @@ -151,12 +151,12 @@ // CHECK-RV32-LABEL: @test_vmsof_m_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv4i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1, @@ -166,12 +166,12 @@ // CHECK-RV32-LABEL: @test_vmsof_m_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv2i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1, @@ -181,12 +181,12 @@ // CHECK-RV32-LABEL: @test_vmsof_m_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmsof_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmsof.mask.nxv1i1.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -2471,12 +2471,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -2485,12 +2485,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -2499,12 +2499,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -2513,12 +2513,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -2527,12 +2527,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -2541,12 +2541,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -2555,12 +2555,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -2569,12 +2569,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -2583,12 +2583,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -2597,12 +2597,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -2611,12 +2611,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2625,12 +2625,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -2639,12 +2639,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2653,12 +2653,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) { @@ -2667,12 +2667,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2681,12 +2681,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2695,12 +2695,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2709,12 +2709,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2723,12 +2723,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2737,12 +2737,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -2751,12 +2751,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2765,12 +2765,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -2779,12 +2779,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2793,12 +2793,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -2807,12 +2807,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2821,12 +2821,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) { @@ -2835,12 +2835,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2849,12 +2849,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2863,12 +2863,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2877,12 +2877,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -2891,12 +2891,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2905,12 +2905,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -2919,12 +2919,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2933,12 +2933,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -2947,12 +2947,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2961,12 +2961,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) { @@ -2975,12 +2975,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2989,12 +2989,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) { @@ -3003,12 +3003,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -3017,12 +3017,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) { @@ -3031,12 +3031,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -3045,12 +3045,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) { @@ -3059,12 +3059,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -3073,12 +3073,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { @@ -3087,12 +3087,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -3101,12 +3101,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -3115,12 +3115,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -3129,12 +3129,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -3143,12 +3143,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -3157,12 +3157,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -3171,12 +3171,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -3185,12 +3185,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -3199,12 +3199,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -3213,12 +3213,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -3227,12 +3227,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -3241,12 +3241,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -3255,12 +3255,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -3269,12 +3269,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -3283,12 +3283,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -3297,12 +3297,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -3311,12 +3311,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -3325,12 +3325,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -3339,12 +3339,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -3353,12 +3353,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -3367,12 +3367,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -3381,12 +3381,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -3395,12 +3395,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -3409,12 +3409,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -3423,12 +3423,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -3437,12 +3437,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -3451,12 +3451,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -3465,12 +3465,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -3479,12 +3479,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -3493,12 +3493,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -3507,12 +3507,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -3521,12 +3521,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -3535,12 +3535,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -3549,12 +3549,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -3563,12 +3563,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -3577,12 +3577,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -3591,12 +3591,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -3605,12 +3605,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -3619,12 +3619,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -3633,12 +3633,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -3647,12 +3647,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -3661,12 +3661,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -3675,12 +3675,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -3689,12 +3689,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -3703,12 +3703,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -3717,12 +3717,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -3731,12 +3731,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -3745,12 +3745,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -3759,12 +3759,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -3773,12 +3773,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -3787,12 +3787,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -3801,12 +3801,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -3815,12 +3815,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -3829,12 +3829,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -3843,12 +3843,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -3857,12 +3857,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -3871,12 +3871,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -3885,12 +3885,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -3899,12 +3899,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -3913,12 +3913,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -3927,12 +3927,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -3941,12 +3941,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -3955,12 +3955,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -3969,12 +3969,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -3983,12 +3983,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -3997,12 +3997,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -4011,12 +4011,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -4025,12 +4025,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -4039,12 +4039,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -4053,12 +4053,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -4067,12 +4067,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -4081,12 +4081,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -4095,12 +4095,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -4109,12 +4109,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -4123,12 +4123,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -4137,12 +4137,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -4151,12 +4151,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -4165,12 +4165,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -4179,12 +4179,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -4193,12 +4193,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -4207,12 +4207,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -4221,12 +4221,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -4235,12 +4235,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -4249,12 +4249,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -4263,12 +4263,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -4277,12 +4277,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -4291,12 +4291,12 @@ // CHECK-RV32-LABEL: @test_vmul_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -4305,12 +4305,12 @@ // CHECK-RV32-LABEL: @test_vmul_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -4319,12 +4319,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -4333,12 +4333,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -4347,12 +4347,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -4361,12 +4361,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -4375,12 +4375,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -4389,12 +4389,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -4403,12 +4403,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -4417,12 +4417,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -4431,12 +4431,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -4445,12 +4445,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -4459,12 +4459,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -4473,12 +4473,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -4487,12 +4487,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -4501,12 +4501,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -4515,12 +4515,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -4529,12 +4529,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -4543,12 +4543,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -4557,12 +4557,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -4571,12 +4571,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -4585,12 +4585,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -4599,12 +4599,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -4613,12 +4613,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -4627,12 +4627,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -4641,12 +4641,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -4655,12 +4655,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -4669,12 +4669,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -4683,12 +4683,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -4697,12 +4697,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -4711,12 +4711,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -4725,12 +4725,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -4739,12 +4739,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -4753,12 +4753,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -4767,12 +4767,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -4781,12 +4781,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -4795,12 +4795,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -4809,12 +4809,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -4823,12 +4823,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -4837,12 +4837,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -4851,12 +4851,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -4865,12 +4865,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -4879,12 +4879,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -4893,12 +4893,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -4907,12 +4907,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -4921,12 +4921,12 @@ // CHECK-RV32-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -4935,12 +4935,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -4949,12 +4949,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -4963,12 +4963,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -4977,12 +4977,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -4991,12 +4991,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -5005,12 +5005,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -5019,12 +5019,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -5033,12 +5033,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -5047,12 +5047,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -5061,12 +5061,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -5075,12 +5075,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -5089,12 +5089,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -5103,12 +5103,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -5117,12 +5117,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -5131,12 +5131,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -5145,12 +5145,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -5159,12 +5159,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -5173,12 +5173,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -5187,12 +5187,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -5201,12 +5201,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -5215,12 +5215,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -5229,12 +5229,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -5243,12 +5243,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -5257,12 +5257,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -5271,12 +5271,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -5285,12 +5285,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -5299,12 +5299,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -5313,12 +5313,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -5327,12 +5327,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -5341,12 +5341,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -5355,12 +5355,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -5369,12 +5369,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -5383,12 +5383,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -5397,12 +5397,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -5411,12 +5411,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -5425,12 +5425,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -5439,12 +5439,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -5453,12 +5453,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -5467,12 +5467,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -5481,12 +5481,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -5495,12 +5495,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -5509,12 +5509,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -5523,12 +5523,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -5537,12 +5537,12 @@ // CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -5551,12 +5551,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -5565,12 +5565,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -5579,12 +5579,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -5593,12 +5593,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -5607,12 +5607,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -5621,12 +5621,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -5635,12 +5635,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -5649,12 +5649,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { @@ -5663,12 +5663,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -5677,12 +5677,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { @@ -5691,12 +5691,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -5705,12 +5705,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { @@ -5719,12 +5719,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -5733,12 +5733,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { @@ -5747,12 +5747,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -5761,12 +5761,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -5775,12 +5775,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -5789,12 +5789,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -5803,12 +5803,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -5817,12 +5817,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { @@ -5831,12 +5831,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -5845,12 +5845,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { @@ -5859,12 +5859,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -5873,12 +5873,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { @@ -5887,12 +5887,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -5901,12 +5901,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { @@ -5915,12 +5915,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -5929,12 +5929,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -5943,12 +5943,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -5957,12 +5957,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { @@ -5971,12 +5971,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -5985,12 +5985,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { @@ -5999,12 +5999,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -6013,12 +6013,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { @@ -6027,12 +6027,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -6041,12 +6041,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { @@ -6055,12 +6055,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -6069,12 +6069,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { @@ -6083,12 +6083,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -6097,12 +6097,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { @@ -6111,12 +6111,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -6125,12 +6125,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { @@ -6139,12 +6139,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -6153,12 +6153,12 @@ // CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { @@ -51,60 +51,60 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV32-LABEL: @test_vmv_v_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV32-LABEL: @test_vmv_v_v_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV32-LABEL: @test_vmv_v_v_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { return vmv_v(src, vl); } // CHECK-RV32-LABEL: @test_vmv_v_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { @@ -113,12 +113,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { @@ -141,12 +141,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { @@ -155,12 +155,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { @@ -169,12 +169,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { @@ -183,12 +183,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { @@ -197,12 +197,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { @@ -211,12 +211,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { @@ -225,12 +225,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { @@ -239,12 +239,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { @@ -253,12 +253,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { @@ -267,12 +267,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { @@ -281,12 +281,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { @@ -309,12 +309,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { @@ -323,12 +323,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { @@ -351,12 +351,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { @@ -365,12 +365,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { @@ -379,12 +379,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { @@ -393,12 +393,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { @@ -407,12 +407,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { @@ -421,12 +421,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { @@ -435,12 +435,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { @@ -449,12 +449,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { @@ -463,12 +463,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { @@ -477,12 +477,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { @@ -491,12 +491,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { @@ -505,12 +505,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { @@ -519,12 +519,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { @@ -533,12 +533,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { @@ -561,12 +561,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { @@ -575,12 +575,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { @@ -589,12 +589,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { @@ -603,12 +603,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { @@ -617,12 +617,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { @@ -631,12 +631,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { @@ -645,12 +645,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { @@ -659,12 +659,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { @@ -673,12 +673,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { @@ -687,12 +687,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { @@ -701,12 +701,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { @@ -715,12 +715,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { @@ -729,12 +729,12 @@ // CHECK-RV32-LABEL: @test_vmv_v_v_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i32( [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { @@ -743,24 +743,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i8mf8_i8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf8_i8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) { @@ -769,24 +769,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i8mf4_i8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) { @@ -795,24 +795,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i8mf2_i8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) { @@ -821,24 +821,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i8m1_i8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) { @@ -847,24 +847,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i8m2_i8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) { @@ -873,24 +873,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i8m4_i8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) { @@ -899,24 +899,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i8m8_i8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) { @@ -925,24 +925,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i16mf4_i16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) { @@ -951,24 +951,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i16mf2_i16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) { @@ -977,24 +977,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i16m1_i16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) { @@ -1003,24 +1003,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i16m2_i16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) { @@ -1029,24 +1029,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i16m4_i16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) { @@ -1055,24 +1055,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i16m8_i16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) { @@ -1081,24 +1081,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i32mf2_i32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) { @@ -1107,24 +1107,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i32m1_i32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) { @@ -1133,24 +1133,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i32m2_i32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) { @@ -1159,24 +1159,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i32m4_i32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) { @@ -1185,24 +1185,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i32m8_i32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) { @@ -1211,24 +1211,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i64m1_i64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) { @@ -1237,24 +1237,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i64m2_i64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) { @@ -1263,24 +1263,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i64m4_i64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) { @@ -1289,24 +1289,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_i64m8_i64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) { @@ -1315,24 +1315,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u8mf8_u8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) { @@ -1341,24 +1341,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u8mf4_u8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) { @@ -1367,24 +1367,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u8mf2_u8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) { @@ -1393,24 +1393,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u8m1_u8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) { @@ -1419,24 +1419,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u8m2_u8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) { @@ -1445,24 +1445,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u8m4_u8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) { @@ -1471,24 +1471,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u8m8_u8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i8 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i32( [[DST:%.*]], i8 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dst, uint8_t src, size_t vl) { @@ -1497,24 +1497,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u16mf4_u16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dst, uint16_t src, size_t vl) { @@ -1523,24 +1523,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u16mf2_u16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dst, uint16_t src, size_t vl) { @@ -1549,24 +1549,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u16m1_u16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dst, uint16_t src, size_t vl) { @@ -1575,24 +1575,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u16m2_u16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dst, uint16_t src, size_t vl) { @@ -1601,24 +1601,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u16m4_u16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dst, uint16_t src, size_t vl) { @@ -1627,24 +1627,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u16m8_u16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i16 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i32( [[DST:%.*]], i16 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dst, uint16_t src, size_t vl) { @@ -1653,24 +1653,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u32mf2_u32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dst, uint32_t src, size_t vl) { @@ -1679,24 +1679,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u32m1_u32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dst, uint32_t src, size_t vl) { @@ -1705,24 +1705,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u32m2_u32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dst, uint32_t src, size_t vl) { @@ -1731,24 +1731,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u32m4_u32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dst, uint32_t src, size_t vl) { @@ -1757,24 +1757,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u32m8_u32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i32( [[DST:%.*]], i32 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dst, uint32_t src, size_t vl) { @@ -1783,24 +1783,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u64m1_u64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dst, uint64_t src, size_t vl) { @@ -1809,24 +1809,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u64m2_u64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dst, uint64_t src, size_t vl) { @@ -1835,24 +1835,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u64m4_u64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dst, uint64_t src, size_t vl) { @@ -1861,24 +1861,24 @@ // CHECK-RV32-LABEL: @test_vmv_x_s_u64m8_u64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV32-NEXT: ret i64 [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { return vmv_x(src); } // CHECK-RV32-LABEL: @test_vmv_s_x_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i32( [[DST:%.*]], i64 [[SRC:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dst, uint64_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmxnor_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxnor_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmxnor_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxnor_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmxnor_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxnor_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmxnor_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxnor_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmxnor_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxnor_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmxnor_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxnor_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmxnor_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxnor_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxnor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vmxor_mm_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv64i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxor_mm_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv64i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vmxor_mm_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv32i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxor_mm_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv32i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vmxor_mm_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv16i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxor_mm_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv16i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vmxor_mm_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv8i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxor_mm_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv8i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vmxor_mm_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv4i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxor_mm_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv4i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vmxor_mm_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv2i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxor_mm_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv2i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vmxor_mm_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv1i1.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vmxor_mm_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmxor.nxv1i1.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, @@ -190,12 +190,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, @@ -332,12 +332,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -346,12 +346,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -360,12 +360,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -374,12 +374,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, @@ -445,12 +445,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -459,12 +459,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -488,12 +488,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, @@ -503,12 +503,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -531,12 +531,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -545,12 +545,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -559,12 +559,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -573,12 +573,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -587,12 +587,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -601,12 +601,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, @@ -616,12 +616,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -630,12 +630,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, @@ -645,12 +645,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -659,12 +659,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, @@ -674,12 +674,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -688,12 +688,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, @@ -703,12 +703,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -717,12 +717,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, @@ -732,12 +732,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -746,12 +746,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, @@ -761,12 +761,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -775,12 +775,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, @@ -790,12 +790,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -804,12 +804,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, @@ -848,12 +848,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { @@ -862,12 +862,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -878,12 +878,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -893,12 +893,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -909,12 +909,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -924,12 +924,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -940,12 +940,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -970,12 +970,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -985,12 +985,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1000,12 +1000,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1030,12 +1030,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1045,12 +1045,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1061,12 +1061,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1076,12 +1076,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1092,12 +1092,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1107,12 +1107,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1123,12 +1123,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1138,12 +1138,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1154,12 +1154,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1185,12 +1185,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1216,12 +1216,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1231,12 +1231,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1247,12 +1247,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1262,12 +1262,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1278,12 +1278,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1293,12 +1293,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vnclip_wx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1324,12 +1324,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1340,12 +1340,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1355,12 +1355,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1371,12 +1371,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1386,12 +1386,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1402,12 +1402,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1417,12 +1417,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1433,12 +1433,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1448,12 +1448,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1464,12 +1464,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1479,12 +1479,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1495,12 +1495,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1510,12 +1510,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1526,12 +1526,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1542,12 +1542,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1558,12 +1558,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1574,12 +1574,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1590,12 +1590,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1605,12 +1605,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1621,12 +1621,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1636,12 +1636,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1652,12 +1652,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1667,12 +1667,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1683,12 +1683,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1699,12 +1699,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1730,12 +1730,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1746,12 +1746,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1761,12 +1761,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1777,12 +1777,12 @@ // CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -205,12 +205,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 0, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 0, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vneg_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64( [[ACC:%.*]], i64 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8 (vint8mf8_t op1, size_t vl) { @@ -23,12 +23,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4 (vint8mf4_t op1, size_t vl) { @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2 (vint8mf2_t op1, size_t vl) { @@ -51,12 +51,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1 (vint8m1_t op1, size_t vl) { @@ -65,12 +65,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2 (vint8m2_t op1, size_t vl) { @@ -79,12 +79,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4 (vint8m4_t op1, size_t vl) { @@ -93,12 +93,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8 (vint8m8_t op1, size_t vl) { @@ -107,12 +107,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4 (vint16mf4_t op1, size_t vl) { @@ -121,12 +121,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2 (vint16mf2_t op1, size_t vl) { @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1 (vint16m1_t op1, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2 (vint16m2_t op1, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4 (vint16m4_t op1, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8 (vint16m8_t op1, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2 (vint32mf2_t op1, size_t vl) { @@ -205,12 +205,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1 (vint32m1_t op1, size_t vl) { @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2 (vint32m2_t op1, size_t vl) { @@ -233,12 +233,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4 (vint32m4_t op1, size_t vl) { @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8 (vint32m8_t op1, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1 (vint64m1_t op1, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2 (vint64m2_t op1, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4 (vint64m4_t op1, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8 (vint64m8_t op1, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8 (vuint8mf8_t op1, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4 (vuint8mf4_t op1, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2 (vuint8mf2_t op1, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1 (vuint8m1_t op1, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2 (vuint8m2_t op1, size_t vl) { @@ -387,12 +387,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4 (vuint8m4_t op1, size_t vl) { @@ -401,12 +401,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8 (vuint8m8_t op1, size_t vl) { @@ -415,12 +415,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4 (vuint16mf4_t op1, size_t vl) { @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2 (vuint16mf2_t op1, size_t vl) { @@ -443,12 +443,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1 (vuint16m1_t op1, size_t vl) { @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2 (vuint16m2_t op1, size_t vl) { @@ -471,12 +471,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4 (vuint16m4_t op1, size_t vl) { @@ -485,12 +485,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8 (vuint16m8_t op1, size_t vl) { @@ -499,12 +499,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2 (vuint32mf2_t op1, size_t vl) { @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1 (vuint32m1_t op1, size_t vl) { @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2 (vuint32m2_t op1, size_t vl) { @@ -541,12 +541,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4 (vuint32m4_t op1, size_t vl) { @@ -555,12 +555,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8 (vuint32m8_t op1, size_t vl) { @@ -569,12 +569,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1 (vuint64m1_t op1, size_t vl) { @@ -583,12 +583,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2 (vuint64m2_t op1, size_t vl) { @@ -597,12 +597,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4 (vuint64m4_t op1, size_t vl) { @@ -611,12 +611,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 -1, i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 -1, i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { @@ -625,12 +625,12 @@ // CHECK-RV32-LABEL: @test_vnot_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vnsra_wx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vnsrl_wx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vor_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vor_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv64i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv32i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv16i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv8i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b16( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv4i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b32( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv2i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b64( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv1i1.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv64i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv64i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv32i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv32i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv16i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv16i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv8i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv8i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b16_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv4i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv4i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b32_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv2i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv2i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vpopc_m_b64_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv1i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv1i1.i32( [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret i32 [[TMP0]] // // CHECK-RV64-LABEL: @test_vpopc_m_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64( [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret i64 [[TMP0]] // unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -715,12 +715,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -747,12 +747,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -795,12 +795,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -827,12 +827,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -843,12 +843,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -923,12 +923,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -939,12 +939,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -971,12 +971,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -1051,12 +1051,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -1067,12 +1067,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -1115,12 +1115,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -1163,12 +1163,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -1195,12 +1195,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1259,12 +1259,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -1291,12 +1291,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -1307,12 +1307,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1355,12 +1355,12 @@ // CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -715,12 +715,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -747,12 +747,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -795,12 +795,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -827,12 +827,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -843,12 +843,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -923,12 +923,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -939,12 +939,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -971,12 +971,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -1051,12 +1051,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -1067,12 +1067,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -1115,12 +1115,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -1163,12 +1163,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -1195,12 +1195,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1259,12 +1259,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -1291,12 +1291,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -1307,12 +1307,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1355,12 +1355,12 @@ // CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -715,12 +715,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -747,12 +747,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -795,12 +795,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -827,12 +827,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -843,12 +843,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -923,12 +923,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -939,12 +939,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -971,12 +971,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -1051,12 +1051,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -1067,12 +1067,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -1115,12 +1115,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -1163,12 +1163,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -1195,12 +1195,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1259,12 +1259,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -1291,12 +1291,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -1307,12 +1307,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1355,12 +1355,12 @@ // CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -715,12 +715,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -747,12 +747,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -795,12 +795,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -827,12 +827,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -843,12 +843,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -923,12 +923,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -939,12 +939,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -971,12 +971,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -1051,12 +1051,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -1067,12 +1067,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -1115,12 +1115,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -1163,12 +1163,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -1195,12 +1195,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1259,12 +1259,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -1291,12 +1291,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -1307,12 +1307,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1355,12 +1355,12 @@ // CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -715,12 +715,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -747,12 +747,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -795,12 +795,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -827,12 +827,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -843,12 +843,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -923,12 +923,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -939,12 +939,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -971,12 +971,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -1051,12 +1051,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -1067,12 +1067,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -1115,12 +1115,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -1163,12 +1163,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -1195,12 +1195,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1259,12 +1259,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -1291,12 +1291,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -1307,12 +1307,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1355,12 +1355,12 @@ // CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst, @@ -715,12 +715,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst, @@ -747,12 +747,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst, @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst, @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -795,12 +795,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -811,12 +811,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -827,12 +827,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -843,12 +843,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -859,12 +859,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -923,12 +923,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -939,12 +939,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -971,12 +971,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst, @@ -1035,12 +1035,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst, @@ -1051,12 +1051,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst, @@ -1067,12 +1067,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1083,12 +1083,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst, @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst, @@ -1115,12 +1115,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst, @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -1163,12 +1163,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -1195,12 +1195,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -1243,12 +1243,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1259,12 +1259,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -1275,12 +1275,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -1291,12 +1291,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -1307,12 +1307,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1355,12 +1355,12 @@ // CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vrem_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vrem_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vremu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vremu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) { @@ -38,12 +38,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index, @@ -53,12 +53,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) { @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) { @@ -96,12 +96,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -110,12 +110,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) { @@ -124,12 +124,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -138,12 +138,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) { @@ -152,12 +152,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -166,12 +166,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) { @@ -180,12 +180,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -194,12 +194,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) { @@ -208,12 +208,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index, @@ -223,12 +223,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) { @@ -237,12 +237,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index, @@ -252,12 +252,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) { @@ -266,12 +266,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index, @@ -281,12 +281,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) { @@ -295,12 +295,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index, @@ -310,12 +310,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) { @@ -324,12 +324,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index, @@ -339,12 +339,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) { @@ -353,12 +353,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index, @@ -368,12 +368,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) { @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) { @@ -411,12 +411,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index, @@ -426,12 +426,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) { @@ -440,12 +440,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index, @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index, @@ -484,12 +484,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) { @@ -498,12 +498,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index, @@ -513,12 +513,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) { @@ -527,12 +527,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index, @@ -542,12 +542,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) { @@ -556,12 +556,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index, @@ -571,12 +571,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) { @@ -585,12 +585,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index, @@ -600,12 +600,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) { @@ -614,12 +614,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index, @@ -629,12 +629,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) { @@ -643,12 +643,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index, @@ -658,12 +658,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) { @@ -672,12 +672,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index, @@ -687,12 +687,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) { @@ -701,12 +701,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index, @@ -716,12 +716,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) { @@ -730,12 +730,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) { @@ -744,12 +744,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) { @@ -758,12 +758,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) { @@ -772,12 +772,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) { @@ -786,12 +786,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) { @@ -800,12 +800,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) { @@ -814,12 +814,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv64i8.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) { @@ -828,12 +828,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv64i8.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) { @@ -842,12 +842,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index, @@ -857,12 +857,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index, @@ -872,12 +872,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index, @@ -887,12 +887,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index, @@ -902,12 +902,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index, @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index, @@ -946,12 +946,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) { @@ -960,12 +960,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index, @@ -975,12 +975,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) { @@ -989,12 +989,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32i16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index, @@ -1004,12 +1004,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32i16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) { @@ -1018,12 +1018,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index, @@ -1033,12 +1033,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index, @@ -1048,12 +1048,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index, @@ -1063,12 +1063,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) { @@ -1077,12 +1077,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index, @@ -1092,12 +1092,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) { @@ -1106,12 +1106,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index, @@ -1121,12 +1121,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) { @@ -1135,12 +1135,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16i32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index, @@ -1150,12 +1150,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16i32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) { @@ -1164,12 +1164,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index, @@ -1179,12 +1179,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) { @@ -1193,12 +1193,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index, @@ -1208,12 +1208,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) { @@ -1222,12 +1222,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index, @@ -1237,12 +1237,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) { @@ -1251,12 +1251,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8i64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index, @@ -1266,12 +1266,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8i64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { @@ -1280,12 +1280,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index, @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index, @@ -1310,12 +1310,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index, @@ -1325,12 +1325,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) { @@ -1339,12 +1339,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index, @@ -1354,12 +1354,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) { @@ -1368,12 +1368,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index, @@ -1383,12 +1383,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) { @@ -1397,12 +1397,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index, @@ -1412,12 +1412,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f32.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) { @@ -1426,12 +1426,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index, @@ -1441,12 +1441,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) { @@ -1455,12 +1455,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index, @@ -1470,12 +1470,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) { @@ -1484,12 +1484,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index, @@ -1499,12 +1499,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) { @@ -1513,12 +1513,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i32( [[OP1:%.*]], [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f64.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index, @@ -1528,12 +1528,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i32( [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f64.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) { @@ -1542,12 +1542,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2, @@ -1557,12 +1557,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2, @@ -1572,12 +1572,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2, @@ -1587,12 +1587,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) { @@ -1601,12 +1601,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) { @@ -1615,12 +1615,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) { @@ -1629,12 +1629,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, @@ -1644,12 +1644,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, @@ -1674,12 +1674,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, @@ -1689,12 +1689,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, @@ -1704,12 +1704,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, @@ -1719,12 +1719,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2, @@ -1734,12 +1734,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2, @@ -1749,12 +1749,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2, @@ -1764,12 +1764,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2, @@ -1779,12 +1779,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2, @@ -1794,12 +1794,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2, @@ -1809,12 +1809,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2, @@ -1824,12 +1824,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2, @@ -1839,12 +1839,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2, @@ -1854,12 +1854,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2, @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2, @@ -1884,12 +1884,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2, @@ -1899,12 +1899,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2, @@ -1914,12 +1914,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2, @@ -1929,12 +1929,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2, @@ -1944,12 +1944,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1959,12 +1959,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1974,12 +1974,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -1989,12 +1989,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -2004,12 +2004,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -2019,12 +2019,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -2034,12 +2034,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -2049,12 +2049,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, @@ -2064,12 +2064,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2, @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2, @@ -2094,12 +2094,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2, @@ -2109,12 +2109,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2, @@ -2124,12 +2124,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2, @@ -2139,12 +2139,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2, @@ -2154,12 +2154,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2, @@ -2169,12 +2169,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2, @@ -2184,12 +2184,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2, @@ -2199,12 +2199,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2, @@ -2214,12 +2214,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2, @@ -2229,12 +2229,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2, @@ -2244,12 +2244,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2, @@ -2259,12 +2259,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2, @@ -2274,12 +2274,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, @@ -2304,12 +2304,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -2320,12 +2320,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -2335,12 +2335,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -2351,12 +2351,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -2366,12 +2366,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -2382,12 +2382,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -2397,12 +2397,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -2412,12 +2412,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -2427,12 +2427,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -2442,12 +2442,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -2472,12 +2472,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -2487,12 +2487,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -2502,12 +2502,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -2517,12 +2517,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -2533,12 +2533,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -2549,12 +2549,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -2565,12 +2565,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -2581,12 +2581,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -2597,12 +2597,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -2612,12 +2612,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -2628,12 +2628,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -2643,12 +2643,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -2659,12 +2659,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -2674,12 +2674,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2690,12 +2690,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2705,12 +2705,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2721,12 +2721,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2737,12 +2737,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2753,12 +2753,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2768,12 +2768,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2784,12 +2784,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2799,12 +2799,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2815,12 +2815,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2830,12 +2830,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2846,12 +2846,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2861,12 +2861,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2877,12 +2877,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2892,12 +2892,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2908,12 +2908,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2923,12 +2923,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2939,12 +2939,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2954,12 +2954,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2970,12 +2970,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2985,12 +2985,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -3001,12 +3001,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -3016,12 +3016,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -3032,12 +3032,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -3047,12 +3047,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -3063,12 +3063,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -3078,12 +3078,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -3094,12 +3094,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -3109,12 +3109,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -3125,12 +3125,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -3140,12 +3140,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -3156,12 +3156,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -3171,12 +3171,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -3187,12 +3187,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -3202,12 +3202,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -3218,12 +3218,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -3234,12 +3234,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -3250,12 +3250,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -3266,12 +3266,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -3282,12 +3282,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -3297,12 +3297,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -3313,12 +3313,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -3328,12 +3328,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -3344,12 +3344,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -3359,12 +3359,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -3375,12 +3375,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -3390,12 +3390,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -3406,12 +3406,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -3422,12 +3422,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3438,12 +3438,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3453,12 +3453,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3469,12 +3469,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3484,12 +3484,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3500,12 +3500,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3515,12 +3515,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3531,12 +3531,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3546,12 +3546,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3562,12 +3562,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3577,12 +3577,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3593,12 +3593,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3608,12 +3608,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3624,12 +3624,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3639,12 +3639,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3655,12 +3655,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3670,12 +3670,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -3686,12 +3686,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -3702,12 +3702,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -3718,12 +3718,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -3734,12 +3734,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -3750,12 +3750,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -3766,12 +3766,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -3782,12 +3782,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -3798,12 +3798,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -3814,12 +3814,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -3830,12 +3830,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -3846,12 +3846,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -3862,12 +3862,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -3878,12 +3878,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -3894,12 +3894,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -3910,12 +3910,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -3926,12 +3926,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -3942,12 +3942,12 @@ // CHECK-RV32-LABEL: @test_vrgather_vx_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[INDEX:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -3958,12 +3958,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -3974,12 +3974,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -3990,12 +3990,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -4006,12 +4006,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -4022,12 +4022,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -4038,12 +4038,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -4054,12 +4054,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -4070,12 +4070,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -4086,12 +4086,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -4102,12 +4102,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -4118,12 +4118,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -4134,12 +4134,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -4150,12 +4150,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -4166,12 +4166,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -4182,12 +4182,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -4198,12 +4198,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -4214,12 +4214,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -4230,12 +4230,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -4246,12 +4246,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -4262,12 +4262,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -4278,12 +4278,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -4294,12 +4294,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -4310,12 +4310,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -4326,12 +4326,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -4342,12 +4342,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -4358,12 +4358,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -4374,12 +4374,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -4390,12 +4390,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, @@ -4407,12 +4407,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, @@ -4424,12 +4424,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -4440,12 +4440,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -4456,12 +4456,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -4472,12 +4472,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -4488,12 +4488,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, @@ -4505,12 +4505,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -4521,12 +4521,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -4537,12 +4537,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -4553,12 +4553,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -4569,12 +4569,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -4585,12 +4585,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -4601,12 +4601,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -4617,12 +4617,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -4633,12 +4633,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, @@ -4650,12 +4650,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, @@ -4667,12 +4667,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, @@ -4684,12 +4684,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -4700,12 +4700,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -4716,12 +4716,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, @@ -4733,12 +4733,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, @@ -4750,12 +4750,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, @@ -4767,12 +4767,12 @@ // CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vrsub_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -834,12 +834,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -848,12 +848,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -863,12 +863,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -905,12 +905,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -919,12 +919,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -933,12 +933,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -947,12 +947,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -961,12 +961,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -975,12 +975,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -989,12 +989,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1004,12 +1004,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1018,12 +1018,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1032,12 +1032,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1046,12 +1046,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1060,12 +1060,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1074,12 +1074,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1088,12 +1088,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1102,12 +1102,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1116,12 +1116,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1130,12 +1130,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1144,12 +1144,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1158,12 +1158,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1172,12 +1172,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1186,12 +1186,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1214,12 +1214,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1228,12 +1228,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1242,12 +1242,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1257,12 +1257,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1272,12 +1272,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1287,12 +1287,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1302,12 +1302,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1317,12 +1317,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1332,12 +1332,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1347,12 +1347,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1362,12 +1362,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1377,12 +1377,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1392,12 +1392,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1422,12 +1422,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1437,12 +1437,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1452,12 +1452,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1468,12 +1468,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1483,12 +1483,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1499,12 +1499,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1514,12 +1514,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1529,12 +1529,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1544,12 +1544,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1559,12 +1559,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1574,12 +1574,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1604,12 +1604,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1619,12 +1619,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1634,12 +1634,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1650,12 +1650,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1665,12 +1665,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1680,12 +1680,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1695,12 +1695,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1710,12 +1710,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1725,12 +1725,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1740,12 +1740,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1755,12 +1755,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1770,12 +1770,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1800,12 +1800,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1815,12 +1815,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1830,12 +1830,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1845,12 +1845,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1860,12 +1860,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1875,12 +1875,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1890,12 +1890,12 @@ // CHECK-RV32-LABEL: @test_vsadd_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1905,12 +1905,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1921,12 +1921,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1936,12 +1936,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1952,12 +1952,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1983,12 +1983,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1998,12 +1998,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2013,12 +2013,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2028,12 +2028,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2043,12 +2043,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2058,12 +2058,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2073,12 +2073,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2088,12 +2088,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2103,12 +2103,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2118,12 +2118,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2134,12 +2134,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2150,12 +2150,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2166,12 +2166,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2182,12 +2182,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2198,12 +2198,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2213,12 +2213,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2229,12 +2229,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2244,12 +2244,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2260,12 +2260,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2291,12 +2291,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2306,12 +2306,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2322,12 +2322,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2338,12 +2338,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2354,12 +2354,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2369,12 +2369,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2385,12 +2385,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2400,12 +2400,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2416,12 +2416,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2431,12 +2431,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2447,12 +2447,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2462,12 +2462,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2478,12 +2478,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2493,12 +2493,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2509,12 +2509,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2524,12 +2524,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2540,12 +2540,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2555,12 +2555,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2571,12 +2571,12 @@ // CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, @@ -682,12 +682,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, @@ -697,12 +697,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, @@ -712,12 +712,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, @@ -727,12 +727,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, @@ -742,12 +742,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, @@ -757,12 +757,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin, @@ -772,12 +772,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin, @@ -787,12 +787,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin, @@ -802,12 +802,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin, @@ -817,12 +817,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin, @@ -832,12 +832,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin, @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin, @@ -862,12 +862,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin, @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -892,12 +892,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -922,12 +922,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, @@ -937,12 +937,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, @@ -952,12 +952,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2, @@ -967,12 +967,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, @@ -997,12 +997,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, @@ -1012,12 +1012,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, @@ -1027,12 +1027,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, @@ -1042,12 +1042,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1072,12 +1072,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, @@ -1087,12 +1087,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, @@ -1102,12 +1102,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2, @@ -1117,12 +1117,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, @@ -1132,12 +1132,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, @@ -1162,12 +1162,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, @@ -1177,12 +1177,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, @@ -1192,12 +1192,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, @@ -1207,12 +1207,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, @@ -1222,12 +1222,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2, @@ -1237,12 +1237,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, @@ -1252,12 +1252,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2, @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, @@ -1282,12 +1282,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2, @@ -1297,12 +1297,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vvm_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, @@ -1312,12 +1312,12 @@ // CHECK-RV32-LABEL: @test_vsbc_vxm_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsbc.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], [[BORROWIN:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c @@ -10,13 +10,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11:#.*]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11:[0-9]+]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11:#.*]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11:[0-9]+]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { @@ -26,13 +26,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { @@ -42,13 +42,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { @@ -58,13 +58,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { @@ -74,13 +74,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { @@ -90,13 +90,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { @@ -106,13 +106,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { @@ -122,13 +122,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { @@ -138,13 +138,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { @@ -154,13 +154,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { @@ -170,13 +170,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { @@ -186,13 +186,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { @@ -202,13 +202,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { @@ -218,13 +218,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { @@ -234,13 +234,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { @@ -250,13 +250,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { @@ -266,13 +266,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { @@ -282,13 +282,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { @@ -298,13 +298,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { @@ -314,13 +314,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { @@ -330,13 +330,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { @@ -346,13 +346,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { @@ -362,13 +362,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8mf8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { @@ -378,13 +378,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { @@ -394,13 +394,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { @@ -410,13 +410,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { @@ -426,13 +426,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { @@ -442,13 +442,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { @@ -458,13 +458,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { @@ -474,13 +474,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16mf4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { @@ -490,13 +490,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { @@ -506,13 +506,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { @@ -522,13 +522,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { @@ -538,13 +538,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { @@ -554,13 +554,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { @@ -570,13 +570,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { @@ -586,13 +586,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { @@ -602,13 +602,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { @@ -618,13 +618,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { @@ -634,13 +634,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { @@ -650,13 +650,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { @@ -666,13 +666,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { @@ -682,13 +682,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { @@ -698,13 +698,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { @@ -714,13 +714,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32mf2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { @@ -730,13 +730,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { @@ -746,13 +746,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { @@ -762,13 +762,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { @@ -778,13 +778,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { @@ -794,13 +794,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { @@ -810,13 +810,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { @@ -826,13 +826,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { @@ -842,13 +842,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { @@ -858,13 +858,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { @@ -874,13 +874,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { @@ -890,13 +890,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { @@ -906,13 +906,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { @@ -922,13 +922,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { @@ -938,13 +938,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { @@ -954,13 +954,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_i8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { @@ -970,13 +970,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { @@ -986,13 +986,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { @@ -1002,13 +1002,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { @@ -1018,13 +1018,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { @@ -1034,13 +1034,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { @@ -1050,13 +1050,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_i16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { @@ -1066,13 +1066,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { @@ -1082,13 +1082,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { @@ -1098,13 +1098,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { @@ -1114,13 +1114,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { @@ -1130,13 +1130,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_i32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { @@ -1146,13 +1146,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { @@ -1162,13 +1162,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { @@ -1178,13 +1178,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { @@ -1194,13 +1194,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_i64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { @@ -1210,13 +1210,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8mf8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { @@ -1226,13 +1226,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { @@ -1242,13 +1242,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { @@ -1258,13 +1258,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { @@ -1274,13 +1274,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { @@ -1290,13 +1290,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { @@ -1306,13 +1306,13 @@ // CHECK-RV32-LABEL: @test_vse8_v_u8m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { @@ -1322,13 +1322,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16mf4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { @@ -1338,13 +1338,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { @@ -1354,13 +1354,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { @@ -1370,13 +1370,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { @@ -1386,13 +1386,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { @@ -1402,13 +1402,13 @@ // CHECK-RV32-LABEL: @test_vse16_v_u16m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { @@ -1418,13 +1418,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { @@ -1434,13 +1434,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { @@ -1450,13 +1450,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { @@ -1466,13 +1466,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { @@ -1482,13 +1482,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_u32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { @@ -1498,13 +1498,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { @@ -1514,13 +1514,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { @@ -1530,13 +1530,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { @@ -1546,13 +1546,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_u64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { @@ -1562,13 +1562,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32mf2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { @@ -1578,13 +1578,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { @@ -1594,13 +1594,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { @@ -1610,13 +1610,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { @@ -1626,13 +1626,13 @@ // CHECK-RV32-LABEL: @test_vse32_v_f32m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { @@ -1642,13 +1642,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m1_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { @@ -1658,13 +1658,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m2_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { @@ -1674,13 +1674,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m4_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { @@ -1690,13 +1690,13 @@ // CHECK-RV32-LABEL: @test_vse64_v_f64m8_m( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { @@ -1706,13 +1706,13 @@ // CHECK-RV32-LABEL: @test_vse1_v_b1( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv64i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv64i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse1_v_b1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) { @@ -1722,13 +1722,13 @@ // CHECK-RV32-LABEL: @test_vse1_v_b2( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv32i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv32i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse1_v_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) { @@ -1738,13 +1738,13 @@ // CHECK-RV32-LABEL: @test_vse1_v_b4( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv16i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv16i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse1_v_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) { @@ -1754,13 +1754,13 @@ // CHECK-RV32-LABEL: @test_vse1_v_b8( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv8i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv8i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse1_v_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) { @@ -1770,13 +1770,13 @@ // CHECK-RV32-LABEL: @test_vse1_v_b16( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv4i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv4i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse1_v_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) { @@ -1786,13 +1786,13 @@ // CHECK-RV32-LABEL: @test_vse1_v_b32( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv2i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv2i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse1_v_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { @@ -1802,13 +1802,13 @@ // CHECK-RV32-LABEL: @test_vse1_v_b64( // CHECK-RV32-NEXT: entry: // CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv1i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv1i1.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV32-NEXT: ret void // // CHECK-RV64-LABEL: @test_vse1_v_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]] +// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) #[[ATTR11]] // CHECK-RV64-NEXT: ret void // void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -414,12 +414,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -459,12 +459,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -489,12 +489,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -504,12 +504,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -519,12 +519,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -549,12 +549,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -564,12 +564,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -579,12 +579,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -594,12 +594,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf8_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -624,12 +624,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -654,12 +654,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -669,12 +669,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -684,12 +684,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -714,12 +714,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -729,12 +729,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -744,12 +744,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf4_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -759,12 +759,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -774,12 +774,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -789,12 +789,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -804,12 +804,12 @@ // CHECK-RV32-LABEL: @test_vsext_vf2_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -120,12 +120,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -206,12 +206,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -220,12 +220,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -248,12 +248,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -276,12 +276,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -290,12 +290,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -304,12 +304,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -318,12 +318,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value, @@ -333,12 +333,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value, @@ -348,12 +348,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value, @@ -363,12 +363,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -377,12 +377,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -391,12 +391,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -405,12 +405,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -419,12 +419,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -434,12 +434,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -449,12 +449,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -464,12 +464,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -479,12 +479,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -494,12 +494,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -509,12 +509,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -524,12 +524,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -554,12 +554,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -569,12 +569,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -584,12 +584,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -599,12 +599,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -614,12 +614,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -629,12 +629,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, @@ -644,12 +644,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -660,12 +660,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -676,12 +676,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -692,12 +692,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -722,12 +722,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -737,12 +737,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -752,12 +752,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -768,12 +768,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -784,12 +784,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -800,12 +800,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -816,12 +816,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -832,12 +832,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -848,12 +848,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -864,12 +864,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -880,12 +880,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -896,12 +896,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -912,12 +912,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -928,12 +928,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -944,12 +944,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -960,12 +960,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -976,12 +976,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -992,12 +992,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1008,12 +1008,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1024,12 +1024,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1040,12 +1040,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1056,12 +1056,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1072,12 +1072,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1088,12 +1088,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1104,12 +1104,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, @@ -1121,12 +1121,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, @@ -1138,12 +1138,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1154,12 +1154,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1170,12 +1170,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1186,12 +1186,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1202,12 +1202,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, @@ -1219,12 +1219,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1235,12 +1235,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1251,12 +1251,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1283,12 +1283,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1299,12 +1299,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1315,12 +1315,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1331,12 +1331,12 @@ // CHECK-RV32-LABEL: @test_vslide1down_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value, @@ -120,12 +120,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value, @@ -135,12 +135,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) { @@ -149,12 +149,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) { @@ -163,12 +163,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) { @@ -177,12 +177,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) { @@ -191,12 +191,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value, @@ -206,12 +206,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) { @@ -220,12 +220,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) { @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) { @@ -248,12 +248,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) { @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) { @@ -276,12 +276,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) { @@ -290,12 +290,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) { @@ -304,12 +304,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) { @@ -318,12 +318,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) { @@ -332,12 +332,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) { @@ -346,12 +346,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) { @@ -360,12 +360,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) { @@ -374,12 +374,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) { @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i32( [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv64i8.i8.i64( [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value, @@ -431,12 +431,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value, @@ -446,12 +446,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value, @@ -461,12 +461,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value, @@ -476,12 +476,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value, @@ -491,12 +491,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i32( [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv32i16.i16.i64( [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value, @@ -506,12 +506,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value, @@ -521,12 +521,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value, @@ -536,12 +536,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value, @@ -551,12 +551,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value, @@ -566,12 +566,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i32( [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv16i32.i32.i64( [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value, @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv1i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value, @@ -596,12 +596,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv2i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value, @@ -611,12 +611,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv4i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, @@ -626,12 +626,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i32( [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, @@ -641,12 +641,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -656,12 +656,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -671,12 +671,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -686,12 +686,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -701,12 +701,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -716,12 +716,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -746,12 +746,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -762,12 +762,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -778,12 +778,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -793,12 +793,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -808,12 +808,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -823,12 +823,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -838,12 +838,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -854,12 +854,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -869,12 +869,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -884,12 +884,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -899,12 +899,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -914,12 +914,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -929,12 +929,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -944,12 +944,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -974,12 +974,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -990,12 +990,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1006,12 +1006,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1022,12 +1022,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -1037,12 +1037,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -1052,12 +1052,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -1067,12 +1067,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -1082,12 +1082,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -1098,12 +1098,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -1114,12 +1114,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -1130,12 +1130,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -1146,12 +1146,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -1162,12 +1162,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1178,12 +1178,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1194,12 +1194,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1210,12 +1210,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1226,12 +1226,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1242,12 +1242,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1258,12 +1258,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1274,12 +1274,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1290,12 +1290,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1306,12 +1306,12 @@ // CHECK-RV32-LABEL: @test_vslide1up_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src, @@ -294,12 +294,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src, @@ -309,12 +309,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src, @@ -324,12 +324,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src, @@ -339,12 +339,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -354,12 +354,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -369,12 +369,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -384,12 +384,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, @@ -414,12 +414,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -459,12 +459,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -489,12 +489,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -504,12 +504,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -519,12 +519,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -549,12 +549,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -564,12 +564,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -579,12 +579,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -594,12 +594,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -624,12 +624,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -654,12 +654,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -669,12 +669,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -684,12 +684,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -714,12 +714,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -729,12 +729,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -744,12 +744,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -759,12 +759,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -774,12 +774,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -789,12 +789,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, @@ -804,12 +804,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -820,12 +820,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -836,12 +836,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -852,12 +852,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -867,12 +867,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -882,12 +882,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -897,12 +897,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -912,12 +912,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -928,12 +928,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -944,12 +944,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -960,12 +960,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -976,12 +976,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -992,12 +992,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -1008,12 +1008,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -1024,12 +1024,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -1040,12 +1040,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -1056,12 +1056,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -1072,12 +1072,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -1088,12 +1088,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -1104,12 +1104,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -1120,12 +1120,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -1136,12 +1136,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -1152,12 +1152,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -1168,12 +1168,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -1184,12 +1184,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1215,12 +1215,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -1230,12 +1230,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -1245,12 +1245,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -1260,12 +1260,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -1276,12 +1276,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -1292,12 +1292,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1308,12 +1308,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -1324,12 +1324,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -1340,12 +1340,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -1356,12 +1356,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -1372,12 +1372,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1388,12 +1388,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -1404,12 +1404,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -1420,12 +1420,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -1436,12 +1436,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1452,12 +1452,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -1468,12 +1468,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -1484,12 +1484,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -1500,12 +1500,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1516,12 +1516,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1532,12 +1532,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1548,12 +1548,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1564,12 +1564,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1580,12 +1580,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1596,12 +1596,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1612,12 +1612,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1628,12 +1628,12 @@ // CHECK-RV32-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslidedown.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c @@ -9,12 +9,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset, @@ -24,12 +24,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset, @@ -39,12 +39,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset, @@ -54,12 +54,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset, @@ -69,12 +69,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset, @@ -84,12 +84,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset, @@ -99,12 +99,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset, @@ -114,12 +114,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src, @@ -129,12 +129,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src, @@ -144,12 +144,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset, @@ -159,12 +159,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset, @@ -174,12 +174,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset, @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset, @@ -204,12 +204,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src, @@ -219,12 +219,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset, @@ -234,12 +234,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset, @@ -249,12 +249,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset, @@ -264,12 +264,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset, @@ -279,12 +279,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset, @@ -294,12 +294,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset, @@ -309,12 +309,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset, @@ -324,12 +324,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset, @@ -339,12 +339,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src, @@ -354,12 +354,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src, @@ -369,12 +369,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src, @@ -384,12 +384,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset, @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset, @@ -414,12 +414,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset, @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset, @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src, @@ -459,12 +459,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src, @@ -489,12 +489,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src, @@ -504,12 +504,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src, @@ -519,12 +519,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src, @@ -549,12 +549,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src, @@ -564,12 +564,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src, @@ -579,12 +579,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src, @@ -594,12 +594,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src, @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src, @@ -624,12 +624,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src, @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src, @@ -654,12 +654,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src, @@ -669,12 +669,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src, @@ -684,12 +684,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src, @@ -714,12 +714,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src, @@ -729,12 +729,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src, @@ -744,12 +744,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src, @@ -759,12 +759,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src, @@ -774,12 +774,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src, @@ -789,12 +789,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src, @@ -804,12 +804,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst, @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst, @@ -834,12 +834,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst, @@ -849,12 +849,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src, @@ -864,12 +864,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src, @@ -879,12 +879,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src, @@ -894,12 +894,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src, @@ -909,12 +909,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst, @@ -925,12 +925,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst, @@ -941,12 +941,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -956,12 +956,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst, @@ -971,12 +971,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst, @@ -986,12 +986,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst, @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst, @@ -1017,12 +1017,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -1032,12 +1032,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst, @@ -1047,12 +1047,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst, @@ -1062,12 +1062,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst, @@ -1077,12 +1077,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -1092,12 +1092,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst, @@ -1107,12 +1107,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst, @@ -1122,12 +1122,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst, @@ -1137,12 +1137,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst, @@ -1153,12 +1153,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst, @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst, @@ -1185,12 +1185,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst, @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst, @@ -1215,12 +1215,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst, @@ -1230,12 +1230,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv64i8.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst, @@ -1245,12 +1245,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst, @@ -1261,12 +1261,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst, @@ -1277,12 +1277,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -1293,12 +1293,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst, @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst, @@ -1325,12 +1325,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv32i16.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst, @@ -1341,12 +1341,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst, @@ -1357,12 +1357,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -1373,12 +1373,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst, @@ -1389,12 +1389,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst, @@ -1405,12 +1405,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16i32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst, @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1437,12 +1437,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst, @@ -1453,12 +1453,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst, @@ -1469,12 +1469,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8i64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst, @@ -1485,12 +1485,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst, @@ -1501,12 +1501,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst, @@ -1517,12 +1517,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst, @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst, @@ -1549,12 +1549,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv16f32.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst, @@ -1565,12 +1565,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv1f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst, @@ -1581,12 +1581,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv2f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst, @@ -1597,12 +1597,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv4f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst, @@ -1613,12 +1613,12 @@ // CHECK-RV32-LABEL: @test_vslideup_vx_f64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i32( [[DST:%.*]], [[SRC:%.*]], i32 [[OFFSET:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslideup.mask.nxv8f64.i64( [[DST:%.*]], [[SRC:%.*]], i64 [[OFFSET:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vsll_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vsll_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -638,12 +638,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -653,12 +653,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -668,12 +668,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -683,12 +683,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -698,12 +698,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -713,12 +713,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -728,12 +728,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -743,12 +743,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -758,12 +758,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -773,12 +773,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -788,12 +788,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -803,12 +803,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -818,12 +818,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -849,12 +849,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -864,12 +864,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -880,12 +880,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -895,12 +895,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -910,12 +910,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -925,12 +925,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -940,12 +940,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -955,12 +955,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -970,12 +970,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -985,12 +985,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1000,12 +1000,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1031,12 +1031,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1046,12 +1046,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1061,12 +1061,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1076,12 +1076,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1091,12 +1091,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1106,12 +1106,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1121,12 +1121,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1136,12 +1136,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1151,12 +1151,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1166,12 +1166,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1181,12 +1181,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1196,12 +1196,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1226,12 +1226,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1241,12 +1241,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1256,12 +1256,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1271,12 +1271,12 @@ // CHECK-RV32-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vsra_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vsra_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vsrl_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, @@ -218,12 +218,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) { @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) { @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) { @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) { @@ -486,12 +486,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) { @@ -514,12 +514,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) { @@ -542,12 +542,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -556,12 +556,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) { @@ -570,12 +570,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -584,12 +584,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) { @@ -598,12 +598,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -612,12 +612,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { @@ -626,12 +626,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -641,12 +641,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -656,12 +656,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -671,12 +671,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -686,12 +686,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -701,12 +701,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -716,12 +716,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -731,12 +731,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -746,12 +746,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -761,12 +761,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -776,12 +776,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -806,12 +806,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -821,12 +821,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -836,12 +836,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -852,12 +852,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -867,12 +867,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -883,12 +883,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -898,12 +898,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -913,12 +913,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -928,12 +928,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -943,12 +943,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -958,12 +958,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -988,12 +988,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1003,12 +1003,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1018,12 +1018,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1034,12 +1034,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1049,12 +1049,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1064,12 +1064,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1079,12 +1079,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1094,12 +1094,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1109,12 +1109,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1124,12 +1124,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1139,12 +1139,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1154,12 +1154,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1184,12 +1184,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1199,12 +1199,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1214,12 +1214,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1229,12 +1229,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1244,12 +1244,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1259,12 +1259,12 @@ // CHECK-RV32-LABEL: @test_vssra_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1274,12 +1274,12 @@ // CHECK-RV32-LABEL: @test_vssra_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv64i8.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, @@ -218,12 +218,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) { @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) { @@ -261,12 +261,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) { @@ -275,12 +275,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) { @@ -289,12 +289,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) { @@ -303,12 +303,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) { @@ -317,12 +317,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) { @@ -331,12 +331,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) { @@ -345,12 +345,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) { @@ -359,12 +359,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv32i16.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) { @@ -373,12 +373,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, @@ -388,12 +388,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) { @@ -402,12 +402,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) { @@ -416,12 +416,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) { @@ -430,12 +430,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) { @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) { @@ -458,12 +458,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) { @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) { @@ -486,12 +486,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) { @@ -500,12 +500,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv16i32.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) { @@ -514,12 +514,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) { @@ -528,12 +528,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) { @@ -542,12 +542,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) { @@ -556,12 +556,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) { @@ -570,12 +570,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) { @@ -584,12 +584,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) { @@ -598,12 +598,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { @@ -612,12 +612,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { @@ -626,12 +626,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -642,12 +642,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -657,12 +657,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -673,12 +673,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -688,12 +688,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -704,12 +704,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -719,12 +719,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -734,12 +734,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -764,12 +764,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -779,12 +779,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -794,12 +794,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -809,12 +809,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -824,12 +824,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -839,12 +839,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -855,12 +855,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -870,12 +870,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -886,12 +886,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -901,12 +901,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -932,12 +932,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -948,12 +948,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -963,12 +963,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -979,12 +979,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -994,12 +994,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1010,12 +1010,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -1025,12 +1025,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1041,12 +1041,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -1056,12 +1056,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1072,12 +1072,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -1087,12 +1087,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1103,12 +1103,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -1118,12 +1118,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1134,12 +1134,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -1149,12 +1149,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1165,12 +1165,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -1180,12 +1180,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1196,12 +1196,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1227,12 +1227,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -1242,12 +1242,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1258,12 +1258,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -1273,12 +1273,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -1289,12 +1289,12 @@ // CHECK-RV32-LABEL: @test_vssrl_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[SHIFT:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, @@ -834,12 +834,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -848,12 +848,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, @@ -863,12 +863,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -905,12 +905,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -919,12 +919,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -933,12 +933,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -947,12 +947,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -961,12 +961,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -975,12 +975,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -989,12 +989,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1004,12 +1004,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1018,12 +1018,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1032,12 +1032,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1046,12 +1046,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1060,12 +1060,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1074,12 +1074,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1088,12 +1088,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1102,12 +1102,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1116,12 +1116,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1130,12 +1130,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1144,12 +1144,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1158,12 +1158,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1172,12 +1172,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1186,12 +1186,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1200,12 +1200,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1214,12 +1214,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1228,12 +1228,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1242,12 +1242,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1257,12 +1257,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, @@ -1272,12 +1272,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1287,12 +1287,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, @@ -1302,12 +1302,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1317,12 +1317,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, @@ -1332,12 +1332,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1347,12 +1347,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, @@ -1362,12 +1362,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1377,12 +1377,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, @@ -1392,12 +1392,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, @@ -1422,12 +1422,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1437,12 +1437,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, @@ -1452,12 +1452,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1468,12 +1468,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1483,12 +1483,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1499,12 +1499,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1514,12 +1514,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1529,12 +1529,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1544,12 +1544,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1559,12 +1559,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1574,12 +1574,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1604,12 +1604,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1619,12 +1619,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -1634,12 +1634,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1650,12 +1650,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -1665,12 +1665,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1680,12 +1680,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -1695,12 +1695,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1710,12 +1710,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -1725,12 +1725,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1740,12 +1740,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -1755,12 +1755,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1770,12 +1770,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1800,12 +1800,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -1815,12 +1815,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1830,12 +1830,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -1845,12 +1845,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1860,12 +1860,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -1875,12 +1875,12 @@ // CHECK-RV32-LABEL: @test_vssub_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1890,12 +1890,12 @@ // CHECK-RV32-LABEL: @test_vssub_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -1905,12 +1905,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1921,12 +1921,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, @@ -1936,12 +1936,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1952,12 +1952,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1983,12 +1983,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, @@ -1998,12 +1998,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2013,12 +2013,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, @@ -2028,12 +2028,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2043,12 +2043,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, @@ -2058,12 +2058,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2073,12 +2073,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, @@ -2088,12 +2088,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2103,12 +2103,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, @@ -2118,12 +2118,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2134,12 +2134,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2150,12 +2150,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2166,12 +2166,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2182,12 +2182,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2198,12 +2198,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2213,12 +2213,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2229,12 +2229,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2244,12 +2244,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2260,12 +2260,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2291,12 +2291,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2306,12 +2306,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2322,12 +2322,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2338,12 +2338,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2354,12 +2354,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -2369,12 +2369,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2385,12 +2385,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -2400,12 +2400,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2416,12 +2416,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -2431,12 +2431,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2447,12 +2447,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -2462,12 +2462,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2478,12 +2478,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -2493,12 +2493,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2509,12 +2509,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -2524,12 +2524,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2540,12 +2540,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -2555,12 +2555,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -2571,12 +2571,12 @@ // CHECK-RV32-LABEL: @test_vssubu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR1]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vsub_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vsub_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -862,12 +862,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -876,12 +876,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -905,12 +905,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -920,12 +920,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -934,12 +934,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -949,12 +949,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -963,12 +963,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -977,12 +977,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -991,12 +991,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -1005,12 +1005,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1033,12 +1033,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1047,12 +1047,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -1061,12 +1061,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -1075,12 +1075,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1089,12 +1089,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1103,12 +1103,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -1117,12 +1117,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1145,12 +1145,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1159,12 +1159,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -1173,12 +1173,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -1187,12 +1187,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1202,12 +1202,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1216,12 +1216,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1231,12 +1231,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -1245,12 +1245,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1260,12 +1260,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1274,12 +1274,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -1288,12 +1288,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -1302,12 +1302,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1316,12 +1316,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1330,12 +1330,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -1344,12 +1344,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -1358,12 +1358,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1372,12 +1372,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1386,12 +1386,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -1400,12 +1400,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -1414,12 +1414,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1428,12 +1428,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1442,12 +1442,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -1456,12 +1456,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -1470,12 +1470,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1485,12 +1485,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1499,12 +1499,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -1513,12 +1513,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -1527,12 +1527,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1541,12 +1541,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1555,12 +1555,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1569,12 +1569,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1583,12 +1583,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1597,12 +1597,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1611,12 +1611,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1625,12 +1625,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1639,12 +1639,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1653,12 +1653,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,12 +1667,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1681,12 +1681,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { @@ -1695,12 +1695,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1710,12 +1710,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1725,12 +1725,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1740,12 +1740,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1755,12 +1755,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1770,12 +1770,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1800,12 +1800,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1815,12 +1815,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1830,12 +1830,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1845,12 +1845,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1860,12 +1860,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1875,12 +1875,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1890,12 +1890,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1905,12 +1905,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1920,12 +1920,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1935,12 +1935,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1950,12 +1950,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1965,12 +1965,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1980,12 +1980,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2010,12 +2010,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2025,12 +2025,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2040,12 +2040,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2055,12 +2055,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2071,12 +2071,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2086,12 +2086,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2102,12 +2102,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2117,12 +2117,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2132,12 +2132,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2147,12 +2147,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2162,12 +2162,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2192,12 +2192,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2207,12 +2207,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2222,12 +2222,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2237,12 +2237,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2252,12 +2252,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2267,12 +2267,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2282,12 +2282,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2297,12 +2297,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2312,12 +2312,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2327,12 +2327,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2342,12 +2342,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2357,12 +2357,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2372,12 +2372,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2402,12 +2402,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2417,12 +2417,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2432,12 +2432,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2447,12 +2447,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2462,12 +2462,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2477,12 +2477,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2492,12 +2492,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2507,12 +2507,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2522,12 +2522,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2537,12 +2537,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2552,12 +2552,12 @@ // CHECK-RV32-LABEL: @test_vwadd_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2567,12 +2567,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2582,12 +2582,12 @@ // CHECK-RV32-LABEL: @test_vwadd_wx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2597,12 +2597,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2613,12 +2613,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2628,12 +2628,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2644,12 +2644,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2659,12 +2659,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2675,12 +2675,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2690,12 +2690,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2706,12 +2706,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2721,12 +2721,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2737,12 +2737,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2752,12 +2752,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2768,12 +2768,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2783,12 +2783,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2798,12 +2798,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2813,12 +2813,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2828,12 +2828,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2843,12 +2843,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2858,12 +2858,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2873,12 +2873,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2888,12 +2888,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2903,12 +2903,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2918,12 +2918,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2933,12 +2933,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2948,12 +2948,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2963,12 +2963,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2979,12 +2979,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2995,12 +2995,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -3011,12 +3011,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -3027,12 +3027,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3043,12 +3043,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3058,12 +3058,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3074,12 +3074,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3089,12 +3089,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3105,12 +3105,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3120,12 +3120,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3136,12 +3136,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3151,12 +3151,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3167,12 +3167,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3182,12 +3182,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3198,12 +3198,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3213,12 +3213,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3229,12 +3229,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3244,12 +3244,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3260,12 +3260,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3275,12 +3275,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3291,12 +3291,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3306,12 +3306,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3322,12 +3322,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3337,12 +3337,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3353,12 +3353,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3368,12 +3368,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3384,12 +3384,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3399,12 +3399,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3415,12 +3415,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3430,12 +3430,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3446,12 +3446,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3461,12 +3461,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3477,12 +3477,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3492,12 +3492,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3508,12 +3508,12 @@ // CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1, @@ -562,12 +562,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2, @@ -577,12 +577,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1, @@ -592,12 +592,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2, @@ -607,12 +607,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1, @@ -622,12 +622,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2, @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1, @@ -652,12 +652,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1, @@ -667,12 +667,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1, @@ -682,12 +682,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1, @@ -697,12 +697,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1, @@ -712,12 +712,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1, @@ -727,12 +727,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1, @@ -742,12 +742,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1, @@ -757,12 +757,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1, @@ -772,12 +772,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1, @@ -787,12 +787,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1, @@ -802,12 +802,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1, @@ -817,12 +817,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1, @@ -832,12 +832,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1, @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1, @@ -862,12 +862,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1, @@ -877,12 +877,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1, @@ -892,12 +892,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1, @@ -907,12 +907,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1, @@ -922,12 +922,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1, @@ -937,12 +937,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1, @@ -952,12 +952,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1, @@ -967,12 +967,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, @@ -982,12 +982,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2, @@ -997,12 +997,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2, @@ -1012,12 +1012,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2, @@ -1027,12 +1027,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2, @@ -1042,12 +1042,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2, @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2, @@ -1072,12 +1072,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2, @@ -1087,12 +1087,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1, @@ -1102,12 +1102,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1, @@ -1117,12 +1117,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1, @@ -1132,12 +1132,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2, @@ -1147,12 +1147,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1, @@ -1162,12 +1162,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2, @@ -1177,12 +1177,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1, @@ -1192,12 +1192,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2, @@ -1207,12 +1207,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1, @@ -1222,12 +1222,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2, @@ -1237,12 +1237,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1, @@ -1252,12 +1252,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2, @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1, @@ -1282,12 +1282,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2, @@ -1297,12 +1297,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1, @@ -1312,12 +1312,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2, @@ -1327,12 +1327,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1, @@ -1342,12 +1342,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2, @@ -1357,12 +1357,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1, @@ -1372,12 +1372,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1, @@ -1387,12 +1387,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2, @@ -1402,12 +1402,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2, @@ -1417,12 +1417,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2, @@ -1432,12 +1432,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2, @@ -1447,12 +1447,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1, @@ -1462,12 +1462,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2, @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2, @@ -1492,12 +1492,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2, @@ -1507,12 +1507,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2, @@ -1522,12 +1522,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2, @@ -1537,12 +1537,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2, @@ -1552,12 +1552,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2, @@ -1567,12 +1567,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2, @@ -1582,12 +1582,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -1597,12 +1597,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1, @@ -1612,12 +1612,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -1627,12 +1627,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1, @@ -1642,12 +1642,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -1657,12 +1657,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -1672,12 +1672,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1, @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -1702,12 +1702,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1, @@ -1717,12 +1717,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -1732,12 +1732,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1, @@ -1747,12 +1747,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -1762,12 +1762,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1778,12 +1778,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -1793,12 +1793,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -1808,12 +1808,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -1823,12 +1823,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -1838,12 +1838,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -1853,12 +1853,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1, @@ -1868,12 +1868,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1, @@ -1898,12 +1898,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -1913,12 +1913,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -1928,12 +1928,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -1943,12 +1943,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -1958,12 +1958,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -1973,12 +1973,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -1988,12 +1988,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -2003,12 +2003,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1, @@ -2018,12 +2018,12 @@ // CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -2033,12 +2033,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -2049,12 +2049,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, @@ -2064,12 +2064,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -2080,12 +2080,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, @@ -2095,12 +2095,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -2111,12 +2111,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, @@ -2126,12 +2126,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, @@ -2141,12 +2141,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1, @@ -2156,12 +2156,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, @@ -2171,12 +2171,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1, @@ -2186,12 +2186,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, @@ -2201,12 +2201,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1, @@ -2216,12 +2216,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -2232,12 +2232,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, @@ -2248,12 +2248,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -2264,12 +2264,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, @@ -2279,12 +2279,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -2295,12 +2295,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, @@ -2310,12 +2310,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -2326,12 +2326,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, @@ -2341,12 +2341,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -2357,12 +2357,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, @@ -2372,12 +2372,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -2388,12 +2388,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, @@ -2403,12 +2403,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -2419,12 +2419,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, @@ -2434,12 +2434,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -2450,12 +2450,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, @@ -2465,12 +2465,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -2481,12 +2481,12 @@ // CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, @@ -2496,12 +2496,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -2512,12 +2512,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -2527,12 +2527,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -2543,12 +2543,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -2558,12 +2558,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, @@ -2574,12 +2574,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1, @@ -2589,12 +2589,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, @@ -2604,12 +2604,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1, @@ -2619,12 +2619,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, @@ -2634,12 +2634,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1, @@ -2649,12 +2649,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, @@ -2664,12 +2664,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1, @@ -2679,12 +2679,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -2695,12 +2695,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -2711,12 +2711,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -2727,12 +2727,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1, @@ -2742,12 +2742,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -2758,12 +2758,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1, @@ -2773,12 +2773,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, @@ -2789,12 +2789,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1, @@ -2804,12 +2804,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, @@ -2820,12 +2820,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1, @@ -2835,12 +2835,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -2851,12 +2851,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1, @@ -2866,12 +2866,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -2882,12 +2882,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1, @@ -2897,12 +2897,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -2913,12 +2913,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1, @@ -2928,12 +2928,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[ACC:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, @@ -2944,12 +2944,12 @@ // CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1, @@ -2959,12 +2959,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, @@ -2974,12 +2974,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, @@ -2989,12 +2989,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1, @@ -3004,12 +3004,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1, @@ -3019,12 +3019,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1, @@ -3034,12 +3034,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i32( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64( [[ACC:%.*]], i8 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1, @@ -3049,12 +3049,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, @@ -3065,12 +3065,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, @@ -3080,12 +3080,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, @@ -3095,12 +3095,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1, @@ -3110,12 +3110,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i32( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64( [[ACC:%.*]], i16 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1, @@ -3125,12 +3125,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, @@ -3140,12 +3140,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, @@ -3155,12 +3155,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, @@ -3170,12 +3170,12 @@ // CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i32( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64( [[ACC:%.*]], i32 [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vwmul_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vwmulu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { @@ -2471,12 +2471,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2485,12 +2485,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { @@ -2499,12 +2499,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2513,12 +2513,12 @@ // CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector, @@ -22,12 +22,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector, @@ -37,12 +37,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector, @@ -52,12 +52,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector, @@ -67,12 +67,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector, @@ -82,12 +82,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector, @@ -97,12 +97,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector, @@ -112,12 +112,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector, @@ -127,12 +127,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector, @@ -142,12 +142,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector, @@ -157,12 +157,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector, @@ -172,12 +172,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector, @@ -187,12 +187,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector, @@ -202,12 +202,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector, @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector, @@ -232,12 +232,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector, @@ -247,12 +247,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector, @@ -262,12 +262,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector, @@ -277,12 +277,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector, @@ -292,12 +292,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector, @@ -307,12 +307,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector, @@ -322,12 +322,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector, @@ -337,12 +337,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector, @@ -352,12 +352,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector, @@ -367,12 +367,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector, @@ -382,12 +382,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector, @@ -397,12 +397,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector, @@ -412,12 +412,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector, @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector, @@ -442,12 +442,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector, @@ -457,12 +457,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector, @@ -472,12 +472,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector, @@ -487,12 +487,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector, @@ -502,12 +502,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector, @@ -517,12 +517,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector, @@ -532,12 +532,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector, @@ -547,12 +547,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst, @@ -563,12 +563,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst, @@ -579,12 +579,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst, @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst, @@ -611,12 +611,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst, @@ -627,12 +627,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst, @@ -643,12 +643,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst, @@ -659,12 +659,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst, @@ -675,12 +675,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst, @@ -691,12 +691,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst, @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst, @@ -723,12 +723,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst, @@ -739,12 +739,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst, @@ -755,12 +755,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst, @@ -771,12 +771,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst, @@ -787,12 +787,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst, @@ -803,12 +803,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst, @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst, @@ -835,12 +835,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst, @@ -851,12 +851,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst, @@ -867,12 +867,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst, @@ -883,12 +883,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst, @@ -899,12 +899,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst, @@ -915,12 +915,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst, @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst, @@ -947,12 +947,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst, @@ -963,12 +963,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst, @@ -979,12 +979,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst, @@ -995,12 +995,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst, @@ -1011,12 +1011,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst, @@ -1027,12 +1027,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst, @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst, @@ -1059,12 +1059,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst, @@ -1075,12 +1075,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst, @@ -1091,12 +1091,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst, @@ -1107,12 +1107,12 @@ // CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[DST:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR5]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, @@ -862,12 +862,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -876,12 +876,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, @@ -891,12 +891,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) { @@ -905,12 +905,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, @@ -920,12 +920,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -934,12 +934,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, @@ -949,12 +949,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) { @@ -963,12 +963,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -977,12 +977,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -991,12 +991,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { @@ -1005,12 +1005,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) { @@ -1019,12 +1019,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1033,12 +1033,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1047,12 +1047,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) { @@ -1061,12 +1061,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) { @@ -1075,12 +1075,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1089,12 +1089,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1103,12 +1103,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) { @@ -1117,12 +1117,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) { @@ -1131,12 +1131,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1145,12 +1145,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1159,12 +1159,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) { @@ -1173,12 +1173,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv32i16.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) { @@ -1187,12 +1187,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, @@ -1202,12 +1202,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1216,12 +1216,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, @@ -1231,12 +1231,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) { @@ -1245,12 +1245,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, @@ -1260,12 +1260,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1274,12 +1274,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { @@ -1288,12 +1288,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) { @@ -1302,12 +1302,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1316,12 +1316,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1330,12 +1330,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) { @@ -1344,12 +1344,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) { @@ -1358,12 +1358,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1372,12 +1372,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1386,12 +1386,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) { @@ -1400,12 +1400,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) { @@ -1414,12 +1414,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1428,12 +1428,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1442,12 +1442,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) { @@ -1456,12 +1456,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv16i32.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) { @@ -1470,12 +1470,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, @@ -1485,12 +1485,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1499,12 +1499,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { @@ -1513,12 +1513,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv1i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) { @@ -1527,12 +1527,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1541,12 +1541,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1555,12 +1555,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) { @@ -1569,12 +1569,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv2i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) { @@ -1583,12 +1583,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1597,12 +1597,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1611,12 +1611,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) { @@ -1625,12 +1625,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv4i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) { @@ -1639,12 +1639,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1653,12 +1653,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1667,12 +1667,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { @@ -1681,12 +1681,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { @@ -1695,12 +1695,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1710,12 +1710,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1725,12 +1725,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1740,12 +1740,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, @@ -1755,12 +1755,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1770,12 +1770,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1800,12 +1800,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, @@ -1815,12 +1815,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1830,12 +1830,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1845,12 +1845,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1860,12 +1860,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, @@ -1875,12 +1875,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1890,12 +1890,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1905,12 +1905,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1920,12 +1920,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, @@ -1935,12 +1935,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1950,12 +1950,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1965,12 +1965,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1980,12 +1980,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2010,12 +2010,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2025,12 +2025,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2040,12 +2040,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, @@ -2055,12 +2055,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2071,12 +2071,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2086,12 +2086,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2102,12 +2102,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, @@ -2117,12 +2117,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2132,12 +2132,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2147,12 +2147,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2162,12 +2162,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2192,12 +2192,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2207,12 +2207,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2222,12 +2222,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, @@ -2237,12 +2237,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2252,12 +2252,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2267,12 +2267,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2282,12 +2282,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, @@ -2297,12 +2297,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2312,12 +2312,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2327,12 +2327,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2342,12 +2342,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, @@ -2357,12 +2357,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2372,12 +2372,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2402,12 +2402,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, @@ -2417,12 +2417,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2432,12 +2432,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2447,12 +2447,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2462,12 +2462,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, @@ -2477,12 +2477,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2492,12 +2492,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2507,12 +2507,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2522,12 +2522,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, @@ -2537,12 +2537,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2552,12 +2552,12 @@ // CHECK-RV32-LABEL: @test_vwsub_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2567,12 +2567,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2582,12 +2582,12 @@ // CHECK-RV32-LABEL: @test_vwsub_wx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, @@ -2597,12 +2597,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2613,12 +2613,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2628,12 +2628,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2644,12 +2644,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -2659,12 +2659,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2675,12 +2675,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2690,12 +2690,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2706,12 +2706,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -2721,12 +2721,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2737,12 +2737,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2752,12 +2752,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2768,12 +2768,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -2783,12 +2783,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2798,12 +2798,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2813,12 +2813,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2828,12 +2828,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -2843,12 +2843,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2858,12 +2858,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2873,12 +2873,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2888,12 +2888,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -2903,12 +2903,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2918,12 +2918,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2933,12 +2933,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2948,12 +2948,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -2963,12 +2963,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2979,12 +2979,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -2995,12 +2995,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -3011,12 +3011,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -3027,12 +3027,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3043,12 +3043,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3058,12 +3058,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3074,12 +3074,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -3089,12 +3089,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3105,12 +3105,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3120,12 +3120,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3136,12 +3136,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -3151,12 +3151,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3167,12 +3167,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3182,12 +3182,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3198,12 +3198,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -3213,12 +3213,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3229,12 +3229,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3244,12 +3244,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3260,12 +3260,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -3275,12 +3275,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3291,12 +3291,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3306,12 +3306,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3322,12 +3322,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -3337,12 +3337,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3353,12 +3353,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3368,12 +3368,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3384,12 +3384,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -3399,12 +3399,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3415,12 +3415,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3430,12 +3430,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3446,12 +3446,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -3461,12 +3461,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3477,12 +3477,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3492,12 +3492,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -3508,12 +3508,12 @@ // CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -413,12 +413,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { @@ -427,12 +427,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -441,12 +441,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { @@ -455,12 +455,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -469,12 +469,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { @@ -483,12 +483,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -497,12 +497,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { @@ -511,12 +511,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -525,12 +525,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { @@ -539,12 +539,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -553,12 +553,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { @@ -567,12 +567,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -581,12 +581,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { @@ -595,12 +595,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { @@ -623,12 +623,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -637,12 +637,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -651,12 +651,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -665,12 +665,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -679,12 +679,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -693,12 +693,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -707,12 +707,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -721,12 +721,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -735,12 +735,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -749,12 +749,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -763,12 +763,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -777,12 +777,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -791,12 +791,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -805,12 +805,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i32( [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -819,12 +819,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -833,12 +833,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -847,12 +847,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -861,12 +861,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -875,12 +875,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -889,12 +889,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -903,12 +903,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -917,12 +917,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -931,12 +931,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -945,12 +945,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -959,12 +959,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -973,12 +973,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i32( [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -987,12 +987,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1001,12 +1001,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1015,12 +1015,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1029,12 +1029,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1043,12 +1043,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1057,12 +1057,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1071,12 +1071,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1085,12 +1085,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1099,12 +1099,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1113,12 +1113,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i32( [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1127,12 +1127,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1141,12 +1141,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1155,12 +1155,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1169,12 +1169,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1183,12 +1183,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1197,12 +1197,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1211,12 +1211,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i32( [[OP1:%.*]], [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1225,12 +1225,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i32( [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { @@ -1239,12 +1239,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -1253,12 +1253,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -1267,12 +1267,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -1281,12 +1281,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -1295,12 +1295,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -1309,12 +1309,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -1323,12 +1323,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -1337,12 +1337,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -1351,12 +1351,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -1365,12 +1365,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -1379,12 +1379,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -1393,12 +1393,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -1407,12 +1407,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -1421,12 +1421,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -1435,12 +1435,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -1449,12 +1449,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -1463,12 +1463,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -1477,12 +1477,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -1491,12 +1491,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -1505,12 +1505,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -1519,12 +1519,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -1533,12 +1533,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -1547,12 +1547,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1561,12 +1561,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1575,12 +1575,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1589,12 +1589,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1603,12 +1603,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1617,12 +1617,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1631,12 +1631,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1645,12 +1645,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1659,12 +1659,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1673,12 +1673,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1687,12 +1687,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1701,12 +1701,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1715,12 +1715,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1729,12 +1729,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1743,12 +1743,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1757,12 +1757,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1771,12 +1771,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1785,12 +1785,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1799,12 +1799,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1813,12 +1813,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1827,12 +1827,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1841,12 +1841,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_i64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1855,12 +1855,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1869,12 +1869,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1883,12 +1883,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1897,12 +1897,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1911,12 +1911,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1925,12 +1925,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1939,12 +1939,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1953,12 +1953,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1967,12 +1967,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1981,12 +1981,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1995,12 +1995,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -2009,12 +2009,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -2023,12 +2023,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -2037,12 +2037,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u8m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -2051,12 +2051,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -2065,12 +2065,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -2079,12 +2079,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -2093,12 +2093,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -2107,12 +2107,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -2121,12 +2121,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -2135,12 +2135,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -2149,12 +2149,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -2163,12 +2163,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -2177,12 +2177,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -2191,12 +2191,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -2205,12 +2205,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -2219,12 +2219,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -2233,12 +2233,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -2247,12 +2247,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -2261,12 +2261,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -2275,12 +2275,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -2289,12 +2289,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -2303,12 +2303,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -2317,12 +2317,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -2331,12 +2331,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -2345,12 +2345,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -2359,12 +2359,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -2373,12 +2373,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -2387,12 +2387,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -2401,12 +2401,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -2415,12 +2415,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -2429,12 +2429,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -2443,12 +2443,12 @@ // CHECK-RV32-LABEL: @test_vxor_vv_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -2457,12 +2457,12 @@ // CHECK-RV32-LABEL: @test_vxor_vx_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c @@ -7,12 +7,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i16.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) { @@ -21,12 +21,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i16.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) { @@ -35,12 +35,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i16.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) { @@ -49,12 +49,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i16.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) { @@ -63,12 +63,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i16.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) { @@ -77,12 +77,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv32i16.nxv32i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) { @@ -91,12 +91,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { @@ -105,12 +105,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { @@ -119,12 +119,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { @@ -133,12 +133,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { @@ -147,12 +147,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { @@ -161,12 +161,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { @@ -175,12 +175,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { @@ -189,12 +189,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { @@ -203,12 +203,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { @@ -217,12 +217,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) { @@ -231,12 +231,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) { @@ -245,12 +245,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) { @@ -259,12 +259,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) { @@ -273,12 +273,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) { @@ -287,12 +287,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { @@ -301,12 +301,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { @@ -315,12 +315,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { @@ -329,12 +329,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { @@ -343,12 +343,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m1( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) { @@ -357,12 +357,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m2( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) { @@ -371,12 +371,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m4( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { @@ -385,12 +385,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m8( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { @@ -399,12 +399,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, @@ -414,12 +414,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, @@ -429,12 +429,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, @@ -444,12 +444,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, @@ -459,12 +459,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, @@ -474,12 +474,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u16m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, @@ -489,12 +489,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -504,12 +504,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -519,12 +519,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -534,12 +534,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -549,12 +549,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -564,12 +564,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -579,12 +579,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -594,12 +594,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -609,12 +609,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf8_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -624,12 +624,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, @@ -639,12 +639,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, @@ -654,12 +654,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, @@ -669,12 +669,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, @@ -684,12 +684,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u32m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, @@ -699,12 +699,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -714,12 +714,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -729,12 +729,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -744,12 +744,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf4_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, @@ -759,12 +759,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, @@ -774,12 +774,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, @@ -789,12 +789,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, @@ -804,12 +804,12 @@ // CHECK-RV32-LABEL: @test_vzext_vf2_u64m8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV32-NEXT: ret [[TMP0]] // // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -164,6 +164,8 @@ // C/C++ intrinsic operand order is different to builtin operand order. Record // the mapping of InputTypes index. SmallVector CTypeOrder; + // Operands are reordered in the header. + bool IsOperandReordered = false; uint8_t RISCVExtensions = 0; public: @@ -185,6 +187,7 @@ bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } bool isMask() const { return IsMask; } + bool isOperandReordered() const { return IsOperandReordered; } size_t getNumOperand() const { return InputTypes.size(); } StringRef getIRName() const { return IRName; } StringRef getManualCodegen() const { return ManualCodegen; } @@ -788,6 +791,7 @@ std::iota(CTypeOrder.begin(), CTypeOrder.end(), 0); // Update default order if we need permutate. if (!PermuteOperands.empty()) { + IsOperandReordered = true; // PermuteOperands is nonmasked version index. Update index when there is // maskedoff operand which is always in first operand. @@ -884,6 +888,11 @@ } void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { + bool UseAliasAttr = !isMask() && !isOperandReordered(); + if (UseAliasAttr) { + OS << "__attribute__((clang_builtin_alias("; + OS << "__builtin_rvv_" << getName() << ")))\n"; + } OS << OutputType->getTypeStr() << " " << getMangledName() << "("; // Emit function arguments if (getNumOperand() > 0) { @@ -891,16 +900,20 @@ for (unsigned i = 0; i < CTypeOrder.size(); ++i) OS << LS << InputTypes[CTypeOrder[i]]->getTypeStr() << " op" << i; } - OS << "){\n"; - OS << " return " << getName() << "("; - // Emit parameter variables - if (getNumOperand() > 0) { - ListSeparator LS; - for (unsigned i = 0; i < CTypeOrder.size(); ++i) - OS << LS << "op" << i; + if (UseAliasAttr) { + OS << ");\n\n"; + } else { + OS << "){\n"; + OS << " return " << getName() << "("; + // Emit parameter variables + if (getNumOperand() > 0) { + ListSeparator LS; + for (unsigned i = 0; i < CTypeOrder.size(); ++i) + OS << LS << "op" << i; + } + OS << ");\n"; + OS << "}\n\n"; } - OS << ");\n"; - OS << "}\n\n"; } //===----------------------------------------------------------------------===//